diff --git a/.gitignore b/.gitignore
index a7a61ed2a..dd82b4451 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,10 @@
# exclude built documentation, since readthedocs will build it for us anyway
/docs/_build
+# exclude kim's commonly used
+# test stdout file location
+test.out
+
# exclude coverage report
cp.out
diff --git a/cmd/gotosocial/action/server/server.go b/cmd/gotosocial/action/server/server.go
index 4caf44cad..6bc27a7c4 100644
--- a/cmd/gotosocial/action/server/server.go
+++ b/cmd/gotosocial/action/server/server.go
@@ -57,12 +57,10 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/observability"
"github.com/superseriousbusiness/gotosocial/internal/oidc"
"github.com/superseriousbusiness/gotosocial/internal/processing"
- tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
"github.com/superseriousbusiness/gotosocial/internal/router"
"github.com/superseriousbusiness/gotosocial/internal/state"
gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/subscriptions"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
"github.com/superseriousbusiness/gotosocial/internal/transport"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/internal/web"
@@ -139,20 +137,6 @@ var Start action.GTSAction = func(ctx context.Context) error {
// Noop on unstarted workers.
state.Workers.Stop()
- if state.Timelines.Home != nil {
- // Home timeline mgr was setup, ensure it gets stopped.
- if err := state.Timelines.Home.Stop(); err != nil {
- log.Errorf(ctx, "error stopping home timeline: %v", err)
- }
- }
-
- if state.Timelines.List != nil {
- // List timeline mgr was setup, ensure it gets stopped.
- if err := state.Timelines.List.Stop(); err != nil {
- log.Errorf(ctx, "error stopping list timeline: %v", err)
- }
- }
-
if process != nil {
const timeout = time.Minute
@@ -323,26 +307,6 @@ var Start action.GTSAction = func(ctx context.Context) error {
// Create a Web Push notification sender.
webPushSender := webpush.NewSender(client, state, typeConverter)
- // Initialize both home / list timelines.
- state.Timelines.Home = timeline.NewManager(
- tlprocessor.HomeTimelineGrab(state),
- tlprocessor.HomeTimelineFilter(state, visFilter),
- tlprocessor.HomeTimelineStatusPrepare(state, typeConverter),
- tlprocessor.SkipInsert(),
- )
- if err := state.Timelines.Home.Start(); err != nil {
- return fmt.Errorf("error starting home timeline: %s", err)
- }
- state.Timelines.List = timeline.NewManager(
- tlprocessor.ListTimelineGrab(state),
- tlprocessor.ListTimelineFilter(state, visFilter),
- tlprocessor.ListTimelineStatusPrepare(state, typeConverter),
- tlprocessor.SkipInsert(),
- )
- if err := state.Timelines.List.Start(); err != nil {
- return fmt.Errorf("error starting list timeline: %s", err)
- }
-
// Start the job scheduler
// (this is required for cleaner).
state.Workers.StartScheduler()
diff --git a/cmd/gotosocial/action/testrig/testrig.go b/cmd/gotosocial/action/testrig/testrig.go
index c0c080e38..cca4ead22 100644
--- a/cmd/gotosocial/action/testrig/testrig.go
+++ b/cmd/gotosocial/action/testrig/testrig.go
@@ -42,12 +42,10 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/middleware"
"github.com/superseriousbusiness/gotosocial/internal/observability"
"github.com/superseriousbusiness/gotosocial/internal/oidc"
- tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
"github.com/superseriousbusiness/gotosocial/internal/router"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/subscriptions"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/internal/web"
"github.com/superseriousbusiness/gotosocial/testrig"
@@ -89,20 +87,6 @@ var Start action.GTSAction = func(ctx context.Context) error {
// tasks from being executed.
testrig.StopWorkers(state)
- if state.Timelines.Home != nil {
- // Home timeline mgr was setup, ensure it gets stopped.
- if err := state.Timelines.Home.Stop(); err != nil {
- log.Errorf(ctx, "error stopping home timeline: %v", err)
- }
- }
-
- if state.Timelines.List != nil {
- // List timeline mgr was setup, ensure it gets stopped.
- if err := state.Timelines.List.Stop(); err != nil {
- log.Errorf(ctx, "error stopping list timeline: %v", err)
- }
- }
-
if state.Storage != nil {
// If storage was created, ensure torn down.
testrig.StandardStorageTeardown(state.Storage)
@@ -172,26 +156,6 @@ var Start action.GTSAction = func(ctx context.Context) error {
typeConverter := typeutils.NewConverter(state)
filter := visibility.NewFilter(state)
- // Initialize both home / list timelines.
- state.Timelines.Home = timeline.NewManager(
- tlprocessor.HomeTimelineGrab(state),
- tlprocessor.HomeTimelineFilter(state, filter),
- tlprocessor.HomeTimelineStatusPrepare(state, typeConverter),
- tlprocessor.SkipInsert(),
- )
- if err := state.Timelines.Home.Start(); err != nil {
- return fmt.Errorf("error starting home timeline: %s", err)
- }
- state.Timelines.List = timeline.NewManager(
- tlprocessor.ListTimelineGrab(state),
- tlprocessor.ListTimelineFilter(state, filter),
- tlprocessor.ListTimelineStatusPrepare(state, typeConverter),
- tlprocessor.SkipInsert(),
- )
- if err := state.Timelines.List.Start(); err != nil {
- return fmt.Errorf("error starting list timeline: %s", err)
- }
-
processor := testrig.NewTestProcessor(state, federator, emailSender, webPushSender, mediaManager)
// Initialize workers.
diff --git a/go.mod b/go.mod
index 143da6208..493ccdc50 100644
--- a/go.mod
+++ b/go.mod
@@ -15,7 +15,6 @@ require (
code.superseriousbusiness.org/exif-terminator v0.11.0
code.superseriousbusiness.org/httpsig v1.4.0
code.superseriousbusiness.org/oauth2/v4 v4.8.0
- codeberg.org/gruf/go-bytes v1.0.2
codeberg.org/gruf/go-bytesize v1.0.3
codeberg.org/gruf/go-byteutil v1.3.0
codeberg.org/gruf/go-cache/v3 v3.6.1
@@ -31,7 +30,7 @@ require (
codeberg.org/gruf/go-runners v1.6.3
codeberg.org/gruf/go-sched v1.2.4
codeberg.org/gruf/go-storage v0.2.0
- codeberg.org/gruf/go-structr v0.9.6
+ codeberg.org/gruf/go-structr v0.9.7
github.com/DmitriyVTitov/size v1.5.0
github.com/KimMachineGun/automemlimit v0.7.1
github.com/SherClockHolmes/webpush-go v1.4.0
diff --git a/go.sum b/go.sum
index 79c9df464..5153bcf3f 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,6 @@ code.superseriousbusiness.org/httpsig v1.4.0 h1:g9+KQMoTG0oR0II5gYb5pVVdNjbc7Cii
code.superseriousbusiness.org/httpsig v1.4.0/go.mod h1:i2AKpj/WbA/o/UTvia9TAREzt0jP1AH3T1Uxjyhdzlw=
code.superseriousbusiness.org/oauth2/v4 v4.8.0 h1:4LVXoPJXKgmDfwDegzBQPNpsdleMaL6YmDgFi6UDgEE=
code.superseriousbusiness.org/oauth2/v4 v4.8.0/go.mod h1:+RLRBXPkjP/VhIC/46dcZkx3t5IvBSJYOjVCPgeWors=
-codeberg.org/gruf/go-bytes v1.0.2 h1:malqE42Ni+h1nnYWBUAJaDDtEzF4aeN4uPN8DfMNNvo=
-codeberg.org/gruf/go-bytes v1.0.2/go.mod h1:1v/ibfaosfXSZtRdW2rWaVrDXMc9E3bsi/M9Ekx39cg=
codeberg.org/gruf/go-bytesize v1.0.3 h1:Tz8tCxhPLeyM5VryuBNjUHgKmLj4Bx9RbPaUSA3qg6g=
codeberg.org/gruf/go-bytesize v1.0.3/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacpp0OHfkvLPs=
codeberg.org/gruf/go-byteutil v1.3.0 h1:nRqJnCcRQ7xbfU6azw7zOzJrSMDIJHBqX6FL9vEMYmU=
@@ -50,8 +48,8 @@ codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw
codeberg.org/gruf/go-sched v1.2.4/go.mod h1:wad6l+OcYGWMA2TzNLMmLObsrbBDxdJfEy5WvTgBjNk=
codeberg.org/gruf/go-storage v0.2.0 h1:mKj3Lx6AavEkuXXtxqPhdq+akW9YwrnP16yQBF7K5ZI=
codeberg.org/gruf/go-storage v0.2.0/go.mod h1:o3GzMDE5QNUaRnm/daUzFqvuAaC4utlgXDXYO79sWKU=
-codeberg.org/gruf/go-structr v0.9.6 h1:FSbJ1A0ubTQB82rC0K4o6qyiqrDGH1t9ivttm8Zy64o=
-codeberg.org/gruf/go-structr v0.9.6/go.mod h1:9k5hYztZ4PsBS+m1v5hUTeFiVUBTLF5VA7d9cd1OEMs=
+codeberg.org/gruf/go-structr v0.9.7 h1:yQeIxTjYb6reNdgESk915twyjolydYBqat/mlZrP7bg=
+codeberg.org/gruf/go-structr v0.9.7/go.mod h1:9k5hYztZ4PsBS+m1v5hUTeFiVUBTLF5VA7d9cd1OEMs=
codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix h1:+JvBZqsQfdT+ROnk2DkvXsKQ9QBorKKKBk5fBqw62I8=
codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix/go.mod h1:WSigRRWEig8zV6t6Sm8Y+EmUjlzA/HoaZJ5edupq7po=
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
diff --git a/internal/api/activitypub/emoji/emojiget_test.go b/internal/api/activitypub/emoji/emojiget_test.go
index 7d3587fd8..11b66f5c2 100644
--- a/internal/api/activitypub/emoji/emojiget_test.go
+++ b/internal/api/activitypub/emoji/emojiget_test.go
@@ -30,7 +30,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/middleware"
@@ -79,12 +78,6 @@ func (suite *EmojiGetTestSuite) SetupTest() {
suite.state.Storage = suite.storage
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
diff --git a/internal/api/activitypub/users/user_test.go b/internal/api/activitypub/users/user_test.go
index c57d9f8c4..f86890231 100644
--- a/internal/api/activitypub/users/user_test.go
+++ b/internal/api/activitypub/users/user_test.go
@@ -25,7 +25,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/middleware"
@@ -86,12 +85,6 @@ func (suite *UserStandardTestSuite) SetupTest() {
suite.state.AdminActions = admin.New(suite.state.DB, &suite.state.Workers)
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
diff --git a/internal/api/client/accounts/account_test.go b/internal/api/client/accounts/account_test.go
index 3daa71c91..27e09cab4 100644
--- a/internal/api/client/accounts/account_test.go
+++ b/internal/api/client/accounts/account_test.go
@@ -31,14 +31,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -88,12 +86,6 @@ func (suite *AccountStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/admin/admin_test.go b/internal/api/client/admin/admin_test.go
index 6bc777119..55249a1cd 100644
--- a/internal/api/client/admin/admin_test.go
+++ b/internal/api/client/admin/admin_test.go
@@ -31,14 +31,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -94,12 +92,6 @@ func (suite *AdminStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/bookmarks/bookmarks_test.go b/internal/api/client/bookmarks/bookmarks_test.go
index 3608078b9..7cb22d34e 100644
--- a/internal/api/client/bookmarks/bookmarks_test.go
+++ b/internal/api/client/bookmarks/bookmarks_test.go
@@ -36,7 +36,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -100,12 +99,6 @@ func (suite *BookmarkTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
diff --git a/internal/api/client/exports/exports_test.go b/internal/api/client/exports/exports_test.go
index 6fbeb57d0..60d428d78 100644
--- a/internal/api/client/exports/exports_test.go
+++ b/internal/api/client/exports/exports_test.go
@@ -29,11 +29,9 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/exports"
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -69,12 +67,6 @@ func (suite *ExportsTestSuite) SetupTest() {
suite.state.DB = testrig.NewTestDB(&suite.state)
suite.state.Storage = testrig.NewInMemoryStorage()
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
testrig.StandardDBSetup(suite.state.DB, nil)
testrig.StandardStorageSetup(suite.state.Storage, "../../../../testrig/media")
diff --git a/internal/api/client/favourites/favourites_test.go b/internal/api/client/favourites/favourites_test.go
index 7c65e4b97..a141b4aa1 100644
--- a/internal/api/client/favourites/favourites_test.go
+++ b/internal/api/client/favourites/favourites_test.go
@@ -24,7 +24,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
@@ -84,12 +83,6 @@ func (suite *FavouritesStandardTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
diff --git a/internal/api/client/filters/v1/filter_test.go b/internal/api/client/filters/v1/filter_test.go
index e0bcf8731..ca3827e3c 100644
--- a/internal/api/client/filters/v1/filter_test.go
+++ b/internal/api/client/filters/v1/filter_test.go
@@ -29,14 +29,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/stream"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -93,12 +91,6 @@ func (suite *FiltersTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/filters/v2/filter_test.go b/internal/api/client/filters/v2/filter_test.go
index af212ac88..5c8fdf4ae 100644
--- a/internal/api/client/filters/v2/filter_test.go
+++ b/internal/api/client/filters/v2/filter_test.go
@@ -29,14 +29,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/stream"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -93,12 +91,6 @@ func (suite *FiltersTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../../testrig/media")), suite.mediaManager)
suite.emailSender = testrig.NewEmailSender("../../../../../web/template/", suite.sentEmails)
diff --git a/internal/api/client/followrequests/followrequest_test.go b/internal/api/client/followrequests/followrequest_test.go
index fbaf9a560..e0f3bada7 100644
--- a/internal/api/client/followrequests/followrequest_test.go
+++ b/internal/api/client/followrequests/followrequest_test.go
@@ -30,14 +30,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -85,12 +83,6 @@ func (suite *FollowRequestStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
diff --git a/internal/api/client/import/import_test.go b/internal/api/client/import/import_test.go
index 1edb54b64..b66749473 100644
--- a/internal/api/client/import/import_test.go
+++ b/internal/api/client/import/import_test.go
@@ -28,11 +28,9 @@ import (
"github.com/stretchr/testify/suite"
importdata "github.com/superseriousbusiness/gotosocial/internal/api/client/import"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -67,12 +65,6 @@ func (suite *ImportTestSuite) SetupTest() {
suite.state.DB = testrig.NewTestDB(&suite.state)
suite.state.Storage = testrig.NewInMemoryStorage()
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
testrig.StandardDBSetup(suite.state.DB, nil)
testrig.StandardStorageSetup(suite.state.Storage, "../../../../testrig/media")
diff --git a/internal/api/client/instance/instance_test.go b/internal/api/client/instance/instance_test.go
index 965d09609..1087e1a36 100644
--- a/internal/api/client/instance/instance_test.go
+++ b/internal/api/client/instance/instance_test.go
@@ -30,14 +30,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -87,12 +85,6 @@ func (suite *InstanceStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/lists/listaccountsadd_test.go b/internal/api/client/lists/listaccountsadd_test.go
index e71cf0992..7e5dd2809 100644
--- a/internal/api/client/lists/listaccountsadd_test.go
+++ b/internal/api/client/lists/listaccountsadd_test.go
@@ -24,8 +24,8 @@ import (
"net/http"
"net/http/httptest"
"testing"
+ "bytes"
- "codeberg.org/gruf/go-bytes"
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/api/client/lists"
"github.com/superseriousbusiness/gotosocial/internal/config"
diff --git a/internal/api/client/lists/lists_test.go b/internal/api/client/lists/lists_test.go
index a4afa24bb..318bdc329 100644
--- a/internal/api/client/lists/lists_test.go
+++ b/internal/api/client/lists/lists_test.go
@@ -24,13 +24,11 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -90,12 +88,6 @@ func (suite *ListsStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
diff --git a/internal/api/client/media/mediacreate_test.go b/internal/api/client/media/mediacreate_test.go
index 6f7bf781f..90acdf94e 100644
--- a/internal/api/client/media/mediacreate_test.go
+++ b/internal/api/client/media/mediacreate_test.go
@@ -36,7 +36,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -93,12 +92,6 @@ func (suite *MediaCreateTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.oauthServer = testrig.NewTestOauthServer(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
diff --git a/internal/api/client/media/mediaupdate_test.go b/internal/api/client/media/mediaupdate_test.go
index 8e033f367..6b00de2f1 100644
--- a/internal/api/client/media/mediaupdate_test.go
+++ b/internal/api/client/media/mediaupdate_test.go
@@ -34,7 +34,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -91,12 +90,6 @@ func (suite *MediaUpdateTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.oauthServer = testrig.NewTestOauthServer(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
diff --git a/internal/api/client/mutes/mutes_test.go b/internal/api/client/mutes/mutes_test.go
index fdfca4414..bafb57355 100644
--- a/internal/api/client/mutes/mutes_test.go
+++ b/internal/api/client/mutes/mutes_test.go
@@ -31,14 +31,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -84,12 +82,6 @@ func (suite *MutesTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/notifications/notifications_test.go b/internal/api/client/notifications/notifications_test.go
index b84e7d768..8da10593d 100644
--- a/internal/api/client/notifications/notifications_test.go
+++ b/internal/api/client/notifications/notifications_test.go
@@ -24,7 +24,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
@@ -86,12 +85,6 @@ func (suite *NotificationsTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
diff --git a/internal/api/client/polls/polls_test.go b/internal/api/client/polls/polls_test.go
index 5df5cf88d..fe6742ef5 100644
--- a/internal/api/client/polls/polls_test.go
+++ b/internal/api/client/polls/polls_test.go
@@ -24,13 +24,11 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -80,12 +78,6 @@ func (suite *PollsStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/reports/reports_test.go b/internal/api/client/reports/reports_test.go
index da39c78e1..5de4b04b1 100644
--- a/internal/api/client/reports/reports_test.go
+++ b/internal/api/client/reports/reports_test.go
@@ -24,13 +24,11 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -79,12 +77,6 @@ func (suite *ReportsStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/search/search_test.go b/internal/api/client/search/search_test.go
index 9eb7f08fe..98e7daeb3 100644
--- a/internal/api/client/search/search_test.go
+++ b/internal/api/client/search/search_test.go
@@ -30,14 +30,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -83,12 +81,6 @@ func (suite *SearchStandardTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/api/client/statuses/status_test.go b/internal/api/client/statuses/status_test.go
index 2b916125e..1e6edbb17 100644
--- a/internal/api/client/statuses/status_test.go
+++ b/internal/api/client/statuses/status_test.go
@@ -30,7 +30,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
"github.com/superseriousbusiness/gotosocial/internal/media"
@@ -197,12 +196,6 @@ func (suite *StatusStandardTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
diff --git a/internal/api/client/streaming/streaming_test.go b/internal/api/client/streaming/streaming_test.go
index 4cc5dc1b2..f33a8efe2 100644
--- a/internal/api/client/streaming/streaming_test.go
+++ b/internal/api/client/streaming/streaming_test.go
@@ -36,7 +36,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -97,12 +96,6 @@ func (suite *StreamingTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
testrig.StandardDBSetup(suite.db, nil)
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
diff --git a/internal/api/client/timelines/home.go b/internal/api/client/timelines/home.go
index 8e957d498..4cb0ae8aa 100644
--- a/internal/api/client/timelines/home.go
+++ b/internal/api/client/timelines/home.go
@@ -23,6 +23,7 @@ import (
"github.com/gin-gonic/gin"
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
// HomeTimelineGETHandler swagger:operation GET /api/v1/timelines/home homeTimeline
@@ -127,13 +128,17 @@ func (m *Module) HomeTimelineGETHandler(c *gin.Context) {
return
}
- limit, errWithCode := apiutil.ParseLimit(c.Query(apiutil.LimitKey), 20, 40, 1)
+ local, errWithCode := apiutil.ParseLocal(c.Query(apiutil.LocalKey), false)
if errWithCode != nil {
apiutil.ErrorHandler(c, errWithCode, m.processor.InstanceGetV1)
return
}
- local, errWithCode := apiutil.ParseLocal(c.Query(apiutil.LocalKey), false)
+ page, errWithCode := paging.ParseIDPage(c,
+ 1, // min limit
+ 40, // max limit
+ 20, // default limit
+ )
if errWithCode != nil {
apiutil.ErrorHandler(c, errWithCode, m.processor.InstanceGetV1)
return
@@ -141,11 +146,8 @@ func (m *Module) HomeTimelineGETHandler(c *gin.Context) {
resp, errWithCode := m.processor.Timeline().HomeTimelineGet(
c.Request.Context(),
- authed,
- c.Query(apiutil.MaxIDKey),
- c.Query(apiutil.SinceIDKey),
- c.Query(apiutil.MinIDKey),
- limit,
+ authed.Account,
+ page,
local,
)
if errWithCode != nil {
diff --git a/internal/api/client/timelines/list.go b/internal/api/client/timelines/list.go
index b02489d6c..2e89f16ea 100644
--- a/internal/api/client/timelines/list.go
+++ b/internal/api/client/timelines/list.go
@@ -23,6 +23,7 @@ import (
"github.com/gin-gonic/gin"
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
// ListTimelineGETHandler swagger:operation GET /api/v1/timelines/list/{id} listTimeline
@@ -131,7 +132,11 @@ func (m *Module) ListTimelineGETHandler(c *gin.Context) {
apiutil.ErrorHandler(c, errWithCode, m.processor.InstanceGetV1)
}
- limit, errWithCode := apiutil.ParseLimit(c.Query(apiutil.LimitKey), 20, 40, 1)
+ page, errWithCode := paging.ParseIDPage(c,
+ 1, // min limit
+ 40, // max limit
+ 20, // default limit
+ )
if errWithCode != nil {
apiutil.ErrorHandler(c, errWithCode, m.processor.InstanceGetV1)
return
@@ -139,12 +144,9 @@ func (m *Module) ListTimelineGETHandler(c *gin.Context) {
resp, errWithCode := m.processor.Timeline().ListTimelineGet(
c.Request.Context(),
- authed,
+ authed.Account,
targetListID,
- c.Query(apiutil.MaxIDKey),
- c.Query(apiutil.SinceIDKey),
- c.Query(apiutil.MinIDKey),
- limit,
+ page,
)
if errWithCode != nil {
apiutil.ErrorHandler(c, errWithCode, m.processor.InstanceGetV1)
diff --git a/internal/api/client/timelines/public.go b/internal/api/client/timelines/public.go
index d6df36f09..7a4a68b77 100644
--- a/internal/api/client/timelines/public.go
+++ b/internal/api/client/timelines/public.go
@@ -24,6 +24,7 @@ import (
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
// PublicTimelineGETHandler swagger:operation GET /api/v1/timelines/public publicTimeline
@@ -141,7 +142,11 @@ func (m *Module) PublicTimelineGETHandler(c *gin.Context) {
return
}
- limit, errWithCode := apiutil.ParseLimit(c.Query(apiutil.LimitKey), 20, 40, 1)
+ page, errWithCode := paging.ParseIDPage(c,
+ 1, // min limit
+ 40, // max limit
+ 20, // default limit
+ )
if errWithCode != nil {
apiutil.ErrorHandler(c, errWithCode, m.processor.InstanceGetV1)
return
@@ -156,10 +161,7 @@ func (m *Module) PublicTimelineGETHandler(c *gin.Context) {
resp, errWithCode := m.processor.Timeline().PublicTimelineGet(
c.Request.Context(),
authed.Account,
- c.Query(apiutil.MaxIDKey),
- c.Query(apiutil.SinceIDKey),
- c.Query(apiutil.MinIDKey),
- limit,
+ page,
local,
)
if errWithCode != nil {
diff --git a/internal/api/client/user/user_test.go b/internal/api/client/user/user_test.go
index 8f54c82a0..a7891d8b1 100644
--- a/internal/api/client/user/user_test.go
+++ b/internal/api/client/user/user_test.go
@@ -28,7 +28,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/api/client/user"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -77,12 +76,6 @@ func (suite *UserStandardTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(
&suite.state,
diff --git a/internal/api/fileserver/fileserver_test.go b/internal/api/fileserver/fileserver_test.go
index 9ba647ff3..b650cb7be 100644
--- a/internal/api/fileserver/fileserver_test.go
+++ b/internal/api/fileserver/fileserver_test.go
@@ -24,7 +24,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log"
"github.com/superseriousbusiness/gotosocial/internal/media"
@@ -92,12 +91,6 @@ func (suite *FileserverTestSuite) SetupSuite() {
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.oauthServer = testrig.NewTestOauthServer(&suite.state)
suite.emailSender = testrig.NewEmailSender("../../../web/template/", nil)
diff --git a/internal/api/wellknown/webfinger/webfinger_test.go b/internal/api/wellknown/webfinger/webfinger_test.go
index d6521aff0..11f64ae9b 100644
--- a/internal/api/wellknown/webfinger/webfinger_test.go
+++ b/internal/api/wellknown/webfinger/webfinger_test.go
@@ -24,7 +24,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/oauth"
@@ -81,12 +80,6 @@ func (suite *WebfingerStandardTestSuite) SetupTest() {
suite.state.AdminActions = admin.New(suite.state.DB, &suite.state.Workers)
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
diff --git a/internal/cache/cache.go b/internal/cache/cache.go
index 7844c03f8..e3fd0d1fe 100644
--- a/internal/cache/cache.go
+++ b/internal/cache/cache.go
@@ -46,6 +46,9 @@ type Caches struct {
// `[status.ID][status.UpdatedAt.Unix()]`
StatusesFilterableFields *ttl.Cache[string, []string]
+ // Timelines ...
+ Timelines TimelineCaches
+
// Visibility provides access to the item visibility
// cache. (used by the visibility filter).
Visibility VisibilityCache
@@ -87,12 +90,14 @@ func (c *Caches) Init() {
c.initFollowRequest()
c.initFollowRequestIDs()
c.initFollowingTagIDs()
+ c.initHomeTimelines()
c.initInReplyToIDs()
c.initInstance()
c.initInteractionRequest()
c.initList()
c.initListIDs()
c.initListedIDs()
+ c.initListTimelines()
c.initMarker()
c.initMedia()
c.initMention()
@@ -109,6 +114,7 @@ func (c *Caches) Init() {
c.initStatusEdit()
c.initStatusFave()
c.initStatusFaveIDs()
+ c.initStatusesFilterableFields()
c.initTag()
c.initThreadMute()
c.initToken()
@@ -120,7 +126,6 @@ func (c *Caches) Init() {
c.initWebPushSubscription()
c.initWebPushSubscriptionIDs()
c.initVisibility()
- c.initStatusesFilterableFields()
}
// Start will start any caches that require a background
@@ -207,6 +212,8 @@ func (c *Caches) Sweep(threshold float64) {
c.DB.User.Trim(threshold)
c.DB.UserMute.Trim(threshold)
c.DB.UserMuteIDs.Trim(threshold)
+ c.Timelines.Home.Trim()
+ c.Timelines.List.Trim()
c.Visibility.Trim(threshold)
}
diff --git a/internal/timeline/unprepare.go b/internal/cache/timeline.go
similarity index 52%
rename from internal/timeline/unprepare.go
rename to internal/cache/timeline.go
index 67a990287..3e6a68558 100644
--- a/internal/timeline/unprepare.go
+++ b/internal/cache/timeline.go
@@ -15,36 +15,37 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package timeline
+package cache
import (
- "context"
+ "github.com/superseriousbusiness/gotosocial/internal/cache/timeline"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
)
-func (t *timeline) Unprepare(ctx context.Context, itemID string) error {
- t.Lock()
- defer t.Unlock()
+type TimelineCaches struct {
+ // Home provides a concurrency-safe map of status timeline
+ // caches for home timelines, keyed by home's account ID.
+ Home timeline.StatusTimelines
- if t.items == nil || t.items.data == nil {
- // Nothing to do.
- return nil
- }
-
- for e := t.items.data.Front(); e != nil; e = e.Next() {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.itemID != itemID && entry.boostOfID != itemID {
- // Not relevant.
- continue
- }
-
- if entry.prepared == nil {
- // It's already unprepared (mood).
- continue
- }
-
- entry.prepared = nil // <- eat this up please garbage collector nom nom nom
- }
-
- return nil
+ // List provides a concurrency-safe map of status
+ // timeline caches for lists, keyed by list ID.
+ List timeline.StatusTimelines
+}
+
+func (c *Caches) initHomeTimelines() {
+ // TODO: configurable
+ cap := 800
+
+ log.Infof(nil, "cache size = %d", cap)
+
+ c.Timelines.Home.Init(cap)
+}
+
+func (c *Caches) initListTimelines() {
+ // TODO: configurable
+ cap := 800
+
+ log.Infof(nil, "cache size = %d", cap)
+
+ c.Timelines.List.Init(cap)
}
diff --git a/internal/cache/timeline/preload.go b/internal/cache/timeline/preload.go
new file mode 100644
index 000000000..b941a8b0c
--- /dev/null
+++ b/internal/cache/timeline/preload.go
@@ -0,0 +1,152 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package timeline
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+)
+
+// preloader provides a means of synchronising the
+// initial fill, or "preload", of a timeline cache.
+// it has 4 possible states in the atomic pointer:
+// - preloading = &(interface{}(*sync.WaitGroup))
+// - preloaded = &(interface{}(nil))
+// - needs preload = &(interface{}(false))
+// - brand-new = nil (functionally same as 'needs preload')
+type preloader struct{ p atomic.Pointer[any] }
+
+// Check will return the current preload state,
+// waiting if a preload is currently in progress.
+func (p *preloader) Check() bool {
+ for {
+ // Get state ptr.
+ ptr := p.p.Load()
+
+ // Check if requires preloading.
+ if ptr == nil || *ptr == false {
+ return false
+ }
+
+ // Check for a preload currently in progress.
+ if wg, _ := (*ptr).(*sync.WaitGroup); wg != nil {
+ wg.Wait()
+ continue
+ }
+
+ // Anything else
+ // means success.
+ return true
+ }
+}
+
+// CheckPreload will safely check the preload state,
+// and if needed call the provided function. if a
+// preload is in progress, it will wait until complete.
+func (p *preloader) CheckPreload(preload func(*any)) {
+ for {
+ // Get state ptr.
+ ptr := p.p.Load()
+
+ if ptr == nil || *ptr == false {
+ // Needs preloading, start it.
+ ok := p.start(ptr, preload)
+
+ if !ok {
+ // Failed to acquire start,
+ // other thread beat us to it.
+ continue
+ }
+
+ // Success!
+ return
+ }
+
+ // Check for a preload currently in progress.
+ if wg, _ := (*ptr).(*sync.WaitGroup); wg != nil {
+ wg.Wait()
+ continue
+ }
+
+ // Anything else
+ // means success.
+ return
+ }
+}
+
+// start attempts to start the given preload function, by performing
+// a compare and swap operation with 'old'. return is success.
+func (p *preloader) start(old *any, preload func(*any)) bool {
+
+ // Optimistically setup a
+ // new waitgroup to set as
+ // the preload waiter.
+ var wg sync.WaitGroup
+ wg.Add(1)
+ defer wg.Done()
+
+ // Wrap waitgroup in
+ // 'any' for pointer.
+ new := any(&wg)
+ ptr := &new
+
+ // Attempt CAS operation to claim start.
+ started := p.p.CompareAndSwap(old, ptr)
+ if !started {
+ return false
+ }
+
+ // Start.
+ preload(ptr)
+ return true
+}
+
+// done marks state as preloaded,
+// i.e. no more preload required.
+func (p *preloader) Done(ptr *any) {
+ if !p.p.CompareAndSwap(ptr, new(any)) {
+ log.Errorf(nil, "BUG: invalid preloader state: %#v", (*p.p.Load()))
+ }
+}
+
+// clear will clear the state, marking a "preload" as required.
+// i.e. next call to Check() will call provided preload func.
+func (p *preloader) Clear() {
+ b := false
+ a := any(b)
+ for {
+ // Load current ptr.
+ ptr := p.p.Load()
+ if ptr == nil {
+ return // was brand-new
+ }
+
+ // Check for a preload currently in progress.
+ if wg, _ := (*ptr).(*sync.WaitGroup); wg != nil {
+ wg.Wait()
+ continue
+ }
+
+ // Try mark as needing preload.
+ if p.p.CompareAndSwap(ptr, &a) {
+ return
+ }
+ }
+}
diff --git a/internal/cache/timeline/status.go b/internal/cache/timeline/status.go
new file mode 100644
index 000000000..071fc5a36
--- /dev/null
+++ b/internal/cache/timeline/status.go
@@ -0,0 +1,842 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package timeline
+
+import (
+ "context"
+ "slices"
+
+ "codeberg.org/gruf/go-structr"
+
+ apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/gtserror"
+ "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
+ "github.com/superseriousbusiness/gotosocial/internal/util"
+ "github.com/superseriousbusiness/gotosocial/internal/util/xslices"
+)
+
+// repeatBoostDepth determines the minimum count
+// of statuses after which repeat boosts, or boosts
+// of the original, may appear. This is may not end
+// up *exact*, as small races between insert and the
+// repeatBoost calculation may allow 1 or so extra
+// to sneak in ahead of time. but it mostly works!
+const repeatBoostDepth = 40
+
+// StatusMeta contains minimum viable metadata
+// about a Status in order to cache a timeline.
+type StatusMeta struct {
+ ID string
+ AccountID string
+ BoostOfID string
+ BoostOfAccountID string
+
+ // is an internal flag that may be set on
+ // a StatusMeta object that will prevent
+ // preparation of its apimodel.Status, due
+ // to it being a recently repeated boost.
+ repeatBoost bool
+
+ // prepared contains prepared frontend API
+ // model for the referenced status. This may
+ // or may-not be nil depending on whether the
+ // status has been "unprepared" since the last
+ // call to "prepare" the frontend model.
+ prepared *apimodel.Status
+
+ // loaded is a temporary field that may be
+ // set for a newly loaded timeline status
+ // so that statuses don't need to be loaded
+ // from the database twice in succession.
+ //
+ // i.e. this will only be set if the status
+ // was newly inserted into the timeline cache.
+ // for existing cache items this will be nil.
+ loaded *gtsmodel.Status
+}
+
+// StatusTimeline provides a concurrency-safe sliding-window
+// cache of the freshest statuses in a timeline. Internally,
+// only StatusMeta{} objects themselves are stored, loading
+// the actual statuses when necessary, but caching prepared
+// frontend API models where possible.
+//
+// Notes on design:
+//
+// Previously, and initially when designing this newer type,
+// we had status timeline caches that would dynamically fill
+// themselves with statuses on call to Load() with statuses
+// at *any* location in the timeline, while simultaneously
+// accepting new input of statuses from the background workers.
+// This unfortunately can lead to situations where posts need
+// to be fetched from the database, but the cache isn't aware
+// they exist and instead returns an incomplete selection.
+// This problem is best outlined by the follow simple example:
+//
+// "what if my timeline cache contains posts 0-to-6 and 8-to-12,
+// and i make a request for posts between 4-and-10 with no limit,
+// how is it to know that it's missing post 7?"
+//
+// The solution is to unfortunately remove a lot of the caching
+// of "older areas" of the timeline, and instead just have it
+// be a sliding window of the freshest posts of that timeline.
+// It gets preloaded initially on start / first-call, and kept
+// up-to-date with new posts by streamed inserts from background
+// workers. Any requests for posts outside this we know therefore
+// must hit the database, (which we then *don't* cache).
+type StatusTimeline struct {
+
+ // underlying timeline cache of *StatusMeta{},
+ // primary-keyed by ID, with extra indices below.
+ cache structr.Timeline[*StatusMeta, string]
+
+ // preloader synchronizes preload
+ // state of the timeline cache.
+ preloader preloader
+
+ // fast-access cache indices.
+ idx_ID *structr.Index //nolint:revive
+ idx_AccountID *structr.Index //nolint:revive
+ idx_BoostOfID *structr.Index //nolint:revive
+ idx_BoostOfAccountID *structr.Index //nolint:revive
+
+ // cutoff and maximum item lengths.
+ // the timeline is trimmed back to
+ // cutoff on each call to Trim(),
+ // and maximum len triggers a Trim().
+ //
+ // the timeline itself does not
+ // limit items due to complexities
+ // it would introduce, so we apply
+ // a 'cut-off' at regular intervals.
+ cut, max int
+}
+
+// Init will initialize the timeline for usage,
+// by preparing internal indices etc. This also
+// sets the given max capacity for Trim() operations.
+func (t *StatusTimeline) Init(cap int) {
+ t.cache.Init(structr.TimelineConfig[*StatusMeta, string]{
+
+ // Timeline item primary key field.
+ PKey: structr.IndexConfig{Fields: "ID"},
+
+ // Additional indexed fields.
+ Indices: []structr.IndexConfig{
+ {Fields: "AccountID", Multiple: true},
+ {Fields: "BoostOfAccountID", Multiple: true},
+ {Fields: "BoostOfID", Multiple: true},
+ },
+
+ // Timeline item copy function.
+ Copy: func(s *StatusMeta) *StatusMeta {
+ var prepared *apimodel.Status
+ if s.prepared != nil {
+ prepared = new(apimodel.Status)
+ *prepared = *s.prepared
+ }
+ return &StatusMeta{
+ ID: s.ID,
+ AccountID: s.AccountID,
+ BoostOfID: s.BoostOfID,
+ BoostOfAccountID: s.BoostOfAccountID,
+ repeatBoost: s.repeatBoost,
+ loaded: nil, // NEVER stored
+ prepared: prepared,
+ }
+ },
+ })
+
+ // Get fast index lookup ptrs.
+ t.idx_ID = t.cache.Index("ID")
+ t.idx_AccountID = t.cache.Index("AccountID")
+ t.idx_BoostOfID = t.cache.Index("BoostOfID")
+ t.idx_BoostOfAccountID = t.cache.Index("BoostOfAccountID")
+
+ // Set maximum capacity and
+ // cutoff threshold we trim to.
+ t.cut = int(0.60 * float64(cap))
+ t.max = cap
+}
+
+// Preload will fill with StatusTimeline{} cache with
+// the latest sliding window of status metadata for the
+// timeline type returned by database 'loadPage' function.
+//
+// This function is concurrency-safe and repeated calls to
+// it when already preloaded will be no-ops. To trigger a
+// preload as being required, call .Clear().
+func (t *StatusTimeline) Preload(
+
+ // loadPage should load the timeline of given page for cache hydration.
+ loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
+
+ // filter can be used to perform filtering of returned
+ // statuses BEFORE insert into cache. i.e. this will effect
+ // what actually gets stored in the timeline cache.
+ filter func(each *gtsmodel.Status) (delete bool),
+) (
+ n int,
+ err error,
+) {
+ t.preloader.CheckPreload(func(ptr *any) {
+ n, err = t.preload(loadPage, filter)
+ if err != nil {
+ return
+ }
+
+ // Mark as preloaded.
+ t.preloader.Done(ptr)
+ })
+ return
+}
+
+// preload contains the core logic of
+// Preload(), without t.preloader checks.
+func (t *StatusTimeline) preload(
+
+ // loadPage should load the timeline of given page for cache hydration.
+ loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
+
+ // filter can be used to perform filtering of returned
+ // statuses BEFORE insert into cache. i.e. this will effect
+ // what actually gets stored in the timeline cache.
+ filter func(each *gtsmodel.Status) (delete bool),
+) (int, error) {
+ if loadPage == nil {
+ panic("nil load page func")
+ }
+
+ // Clear timeline
+ // before preload.
+ t.cache.Clear()
+
+ // Our starting, page at the top
+ // of the possible timeline.
+ page := new(paging.Page)
+ order := paging.OrderDescending
+ page.Max.Order = order
+ page.Max.Value = plus1hULID()
+ page.Min.Order = order
+ page.Min.Value = ""
+ page.Limit = 100
+
+ // Prepare a slice for gathering status meta.
+ metas := make([]*StatusMeta, 0, page.Limit)
+
+ var n int
+ for n < t.cut {
+ // Load page of timeline statuses.
+ statuses, err := loadPage(page)
+ if err != nil {
+ return n, gtserror.Newf("error loading statuses: %w", err)
+ }
+
+ // No more statuses from
+ // load function = at end.
+ if len(statuses) == 0 {
+ break
+ }
+
+ // Update our next page cursor from statuses.
+ page.Max.Value = statuses[len(statuses)-1].ID
+
+ // Perform any filtering on newly loaded statuses.
+ statuses = doStatusFilter(statuses, filter)
+
+ // After filtering no more
+ // statuses remain, retry.
+ if len(statuses) == 0 {
+ continue
+ }
+
+ // Convert statuses to meta and insert.
+ metas = toStatusMeta(metas[:0], statuses)
+ n = t.cache.Insert(metas...)
+ }
+
+ // This is a potentially 100-1000s size map,
+ // but still easily manageable memory-wise.
+ recentBoosts := make(map[string]int, t.cut)
+
+ // Iterate timeline ascending (i.e. oldest -> newest), marking
+ // entry IDs and marking down if boosts have been seen recently.
+ for idx, value := range t.cache.RangeUnsafe(structr.Asc) {
+
+ // Store current ID in map.
+ recentBoosts[value.ID] = idx
+
+ // If it's a boost, check if the original,
+ // or a boost of it has been seen recently.
+ if id := value.BoostOfID; id != "" {
+
+ // Check if seen recently.
+ last, ok := recentBoosts[id]
+ repeat := ok && (idx-last) < 40
+ value.repeatBoost = repeat
+
+ // Update last-seen idx.
+ recentBoosts[id] = idx
+ }
+ }
+
+ return n, nil
+}
+
+// Load will load given page of timeline statuses. First it
+// will prioritize fetching statuses from the sliding window
+// that is the timeline cache of latest statuses, else it will
+// fall back to loading from the database using callback funcs.
+// The returned string values are the low / high status ID
+// paging values, used in calculating next / prev page links.
+func (t *StatusTimeline) Load(
+ ctx context.Context,
+ page *paging.Page,
+
+ // loadPage should load the timeline of given page for cache hydration.
+ loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
+
+ // loadIDs should load status models with given IDs, this is used
+ // to load status models of already cached entries in the timeline.
+ loadIDs func(ids []string) (statuses []*gtsmodel.Status, err error),
+
+ // filter performs filtering of returned statuses.
+ filter func(each *gtsmodel.Status) (delete bool),
+
+ // prepareAPI should prepare internal status model to frontend API model.
+ prepareAPI func(status *gtsmodel.Status) (apiStatus *apimodel.Status, err error),
+) (
+ []*apimodel.Status,
+ string, // lo
+ string, // hi
+ error,
+) {
+ var err error
+
+ // Get paging details.
+ lo := page.Min.Value
+ hi := page.Max.Value
+ limit := page.Limit
+ order := page.Order()
+ dir := toDirection(order)
+
+ // Use a copy of current page so
+ // we can repeatedly update it.
+ nextPg := new(paging.Page)
+ *nextPg = *page
+ nextPg.Min.Value = lo
+ nextPg.Max.Value = hi
+
+ // Interstitial meta objects.
+ var metas []*StatusMeta
+
+ // Returned frontend API statuses.
+ var apiStatuses []*apimodel.Status
+
+ // TODO: we can remove this nil
+ // check when we've updated all
+ // our timeline endpoints to have
+ // streamed timeline caches.
+ if t != nil {
+
+ // Ensure timeline has been preloaded.
+ _, err = t.Preload(loadPage, filter)
+ if err != nil {
+ return nil, "", "", err
+ }
+
+ // First we attempt to load status
+ // metadata entries from the timeline
+ // cache, up to given limit.
+ metas = t.cache.Select(
+ util.PtrIf(lo),
+ util.PtrIf(hi),
+ util.PtrIf(limit),
+ dir,
+ )
+
+ if len(metas) > 0 {
+ // Before we can do any filtering, we need
+ // to load status models for cached entries.
+ err = loadStatuses(metas, loadIDs)
+ if err != nil {
+ return nil, "", "", gtserror.Newf("error loading statuses: %w", err)
+ }
+
+ // Set returned lo, hi values.
+ lo = metas[len(metas)-1].ID
+ hi = metas[0].ID
+
+ // Allocate slice of expected required API models.
+ apiStatuses = make([]*apimodel.Status, 0, len(metas))
+
+ // Prepare frontend API models for
+ // the cached statuses. For now this
+ // also does its own extra filtering.
+ apiStatuses = prepareStatuses(ctx,
+ metas,
+ prepareAPI,
+ apiStatuses,
+ limit,
+ )
+ }
+ }
+
+ // If no cached timeline statuses
+ // were found for page, we need to
+ // call through to the database.
+ if len(apiStatuses) == 0 {
+
+ // Pass through to main timeline db load function.
+ apiStatuses, lo, hi, err = loadStatusTimeline(ctx,
+ nextPg,
+ metas,
+ apiStatuses,
+ loadPage,
+ filter,
+ prepareAPI,
+ )
+ if err != nil {
+ return nil, "", "", err
+ }
+ }
+
+ if order.Ascending() {
+ // The caller always expects the statuses
+ // to be returned in DESC order, but we
+ // build the status slice in paging order.
+ // If paging ASC, we need to reverse the
+ // returned statuses and paging values.
+ slices.Reverse(apiStatuses)
+ lo, hi = hi, lo
+ }
+
+ return apiStatuses, lo, hi, nil
+}
+
+// loadStatusTimeline encapsulates the logic of iteratively
+// attempting to load a status timeline page from the database,
+// that is in the form of given callback functions. these will
+// then be prepared to frontend API models for return.
+//
+// in time it may make sense to move this logic
+// into the StatusTimeline{}.Load() function.
+func loadStatusTimeline(
+ ctx context.Context,
+ nextPg *paging.Page,
+ metas []*StatusMeta,
+ apiStatuses []*apimodel.Status,
+ loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
+ filter func(each *gtsmodel.Status) (delete bool),
+ prepareAPI func(status *gtsmodel.Status) (apiStatus *apimodel.Status, err error),
+) (
+ []*apimodel.Status,
+ string, // lo
+ string, // hi
+ error,
+) {
+ if loadPage == nil {
+ panic("nil load page func")
+ }
+
+ // Lowest and highest ID
+ // vals of loaded statuses.
+ var lo, hi string
+
+ // Extract paging params.
+ order := nextPg.Order()
+ limit := nextPg.Limit
+
+ // Load a little more than
+ // limit to reduce db calls.
+ nextPg.Limit += 10
+
+ // Ensure we have a slice of meta objects to
+ // use in later preparation of the API models.
+ metas = xslices.GrowJust(metas[:0], nextPg.Limit)
+
+ // Ensure we have a slice of required frontend API models.
+ apiStatuses = xslices.GrowJust(apiStatuses[:0], nextPg.Limit)
+
+ // Perform maximum of 5 load
+ // attempts fetching statuses.
+ for i := 0; i < 5; i++ {
+
+ // Load next timeline statuses.
+ statuses, err := loadPage(nextPg)
+ if err != nil {
+ return nil, "", "", gtserror.Newf("error loading timeline: %w", err)
+ }
+
+ // No more statuses from
+ // load function = at end.
+ if len(statuses) == 0 {
+ break
+ }
+
+ if hi == "" {
+ // Set hi returned paging
+ // value if not already set.
+ hi = statuses[0].ID
+ }
+
+ // Update nextPg cursor parameter for next database query.
+ nextPageParams(nextPg, statuses[len(statuses)-1].ID, order)
+
+ // Perform any filtering on newly loaded statuses.
+ statuses = doStatusFilter(statuses, filter)
+
+ // After filtering no more
+ // statuses remain, retry.
+ if len(statuses) == 0 {
+ continue
+ }
+
+ // Convert to our interstitial meta type.
+ metas = toStatusMeta(metas[:0], statuses)
+
+ // Prepare frontend API models for
+ // the loaded statuses. For now this
+ // also does its own extra filtering.
+ apiStatuses = prepareStatuses(ctx,
+ metas,
+ prepareAPI,
+ apiStatuses,
+ limit,
+ )
+
+ // If we have anything, return
+ // here. Even if below limit.
+ if len(apiStatuses) > 0 {
+
+ // Set returned lo status paging value.
+ lo = apiStatuses[len(apiStatuses)-1].ID
+ break
+ }
+ }
+
+ return apiStatuses, lo, hi, nil
+}
+
+// InsertOne allows you to insert a single status into the timeline, with optional prepared API model.
+// The return value indicates whether status should be skipped from streams, e.g. if already boosted recently.
+func (t *StatusTimeline) InsertOne(status *gtsmodel.Status, prepared *apimodel.Status) (skip bool) {
+
+ // If timeline no preloaded, i.e.
+ // no-one using it, don't insert.
+ if !t.preloader.Check() {
+ return false
+ }
+
+ if status.BoostOfID != "" {
+ // Check through top $repeatBoostDepth number of items.
+ for i, value := range t.cache.RangeUnsafe(structr.Desc) {
+ if i >= repeatBoostDepth {
+ break
+ }
+
+ // We don't care about values that have
+ // already been hidden as repeat boosts.
+ if value.repeatBoost {
+ continue
+ }
+
+ // If inserted status has already been boosted, or original was posted
+ // within last $repeatBoostDepth, we indicate it as a repeated boost.
+ if value.ID == status.BoostOfID || value.BoostOfID == status.BoostOfID {
+ skip = true
+ break
+ }
+ }
+ }
+
+ // Insert new timeline status.
+ t.cache.Insert(&StatusMeta{
+ ID: status.ID,
+ AccountID: status.AccountID,
+ BoostOfID: status.BoostOfID,
+ BoostOfAccountID: status.BoostOfAccountID,
+ repeatBoost: skip,
+ loaded: nil,
+ prepared: prepared,
+ })
+
+ return
+}
+
+// RemoveByStatusID removes all cached timeline entries pertaining to
+// status ID, including those that may be a boost of the given status.
+func (t *StatusTimeline) RemoveByStatusIDs(statusIDs ...string) {
+ keys := make([]structr.Key, len(statusIDs))
+
+ // Nil check indices outside loops.
+ if t.idx_ID == nil ||
+ t.idx_BoostOfID == nil {
+ panic("indices are nil")
+ }
+
+ // Convert statusIDs to index keys.
+ for i, id := range statusIDs {
+ keys[i] = t.idx_ID.Key(id)
+ }
+
+ // Invalidate all cached entries with IDs.
+ t.cache.Invalidate(t.idx_ID, keys...)
+
+ // Convert statusIDs to index keys.
+ for i, id := range statusIDs {
+ keys[i] = t.idx_BoostOfID.Key(id)
+ }
+
+ // Invalidate all cached entries as boost of IDs.
+ t.cache.Invalidate(t.idx_BoostOfID, keys...)
+}
+
+// RemoveByAccountID removes all cached timeline entries authored by
+// account ID, including those that may be boosted by account ID.
+func (t *StatusTimeline) RemoveByAccountIDs(accountIDs ...string) {
+ keys := make([]structr.Key, len(accountIDs))
+
+ // Nil check indices outside loops.
+ if t.idx_AccountID == nil ||
+ t.idx_BoostOfAccountID == nil {
+ panic("indices are nil")
+ }
+
+ // Convert accountIDs to index keys.
+ for i, id := range accountIDs {
+ keys[i] = t.idx_AccountID.Key(id)
+ }
+
+ // Invalidate all cached entries as by IDs.
+ t.cache.Invalidate(t.idx_AccountID, keys...)
+
+ // Convert accountIDs to index keys.
+ for i, id := range accountIDs {
+ keys[i] = t.idx_BoostOfAccountID.Key(id)
+ }
+
+ // Invalidate all cached entries as boosted by IDs.
+ t.cache.Invalidate(t.idx_BoostOfAccountID, keys...)
+}
+
+// UnprepareByStatusIDs removes cached frontend API models for all cached
+// timeline entries pertaining to status ID, including boosts of given status.
+func (t *StatusTimeline) UnprepareByStatusIDs(statusIDs ...string) {
+ keys := make([]structr.Key, len(statusIDs))
+
+ // Nil check indices outside loops.
+ if t.idx_ID == nil ||
+ t.idx_BoostOfID == nil {
+ panic("indices are nil")
+ }
+
+ // Convert statusIDs to index keys.
+ for i, id := range statusIDs {
+ keys[i] = t.idx_ID.Key(id)
+ }
+
+ // Unprepare all statuses stored under StatusMeta.ID.
+ for meta := range t.cache.RangeKeysUnsafe(t.idx_ID, keys...) {
+ meta.prepared = nil
+ }
+
+ // Convert statusIDs to index keys.
+ for i, id := range statusIDs {
+ keys[i] = t.idx_BoostOfID.Key(id)
+ }
+
+ // Unprepare all statuses stored under StatusMeta.BoostOfID.
+ for meta := range t.cache.RangeKeysUnsafe(t.idx_BoostOfID, keys...) {
+ meta.prepared = nil
+ }
+}
+
+// UnprepareByAccountIDs removes cached frontend API models for all cached
+// timeline entries authored by account ID, including boosts by account ID.
+func (t *StatusTimeline) UnprepareByAccountIDs(accountIDs ...string) {
+ keys := make([]structr.Key, len(accountIDs))
+
+ // Nil check indices outside loops.
+ if t.idx_AccountID == nil ||
+ t.idx_BoostOfAccountID == nil {
+ panic("indices are nil")
+ }
+
+ // Convert accountIDs to index keys.
+ for i, id := range accountIDs {
+ keys[i] = t.idx_AccountID.Key(id)
+ }
+
+ // Unprepare all statuses stored under StatusMeta.AccountID.
+ for meta := range t.cache.RangeKeysUnsafe(t.idx_AccountID, keys...) {
+ meta.prepared = nil
+ }
+
+ // Convert accountIDs to index keys.
+ for i, id := range accountIDs {
+ keys[i] = t.idx_BoostOfAccountID.Key(id)
+ }
+
+ // Unprepare all statuses stored under StatusMeta.BoostOfAccountID.
+ for meta := range t.cache.RangeKeysUnsafe(t.idx_BoostOfAccountID, keys...) {
+ meta.prepared = nil
+ }
+}
+
+// UnprepareAll removes cached frontend API
+// models for all cached timeline entries.
+func (t *StatusTimeline) UnprepareAll() {
+ for _, value := range t.cache.RangeUnsafe(structr.Asc) {
+ value.prepared = nil
+ }
+}
+
+// Trim will ensure that receiving timeline is less than or
+// equal in length to the given threshold percentage of the
+// timeline's preconfigured maximum capacity. This will always
+// trim from the bottom-up to prioritize streamed inserts.
+func (t *StatusTimeline) Trim() { t.cache.Trim(t.cut, structr.Asc) }
+
+// Clear will mark the entire timeline as requiring preload,
+// which will trigger a clear and reload of the entire thing.
+func (t *StatusTimeline) Clear() { t.preloader.Clear() }
+
+// prepareStatuses takes a slice of cached (or, freshly loaded!) StatusMeta{}
+// models, and use given function to return prepared frontend API models.
+func prepareStatuses(
+ ctx context.Context,
+ meta []*StatusMeta,
+ prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error),
+ apiStatuses []*apimodel.Status,
+ limit int,
+) []*apimodel.Status {
+ switch { //nolint:gocritic
+ case prepareAPI == nil:
+ panic("nil prepare fn")
+ }
+
+ // Iterate the given StatusMeta objects for pre-prepared
+ // frontend models, otherwise attempting to prepare them.
+ for _, meta := range meta {
+
+ // Check if we have prepared enough
+ // API statuses for caller to return.
+ if len(apiStatuses) >= limit {
+ break
+ }
+
+ if meta.loaded == nil {
+ // We failed loading this
+ // status, skip preparing.
+ continue
+ }
+
+ if meta.repeatBoost {
+ // This is a repeat boost in
+ // short timespan, skip it.
+ continue
+ }
+
+ if meta.prepared == nil {
+ var err error
+
+ // Prepare the provided status to frontend.
+ meta.prepared, err = prepareAPI(meta.loaded)
+ if err != nil {
+ log.Errorf(ctx, "error preparing status %s: %v", meta.loaded.URI, err)
+ continue
+ }
+ }
+
+ // Append to return slice.
+ if meta.prepared != nil {
+ apiStatuses = append(apiStatuses, meta.prepared)
+ }
+ }
+
+ return apiStatuses
+}
+
+// loadStatuses loads statuses using provided callback
+// for the statuses in meta slice that aren't loaded.
+// the amount very much depends on whether meta objects
+// are yet-to-be-cached (i.e. newly loaded, with status),
+// or are from the timeline cache (unloaded status).
+func loadStatuses(
+ metas []*StatusMeta,
+ loadIDs func([]string) ([]*gtsmodel.Status, error),
+) error {
+
+ // Determine which of our passed status
+ // meta objects still need statuses loading.
+ toLoadIDs := make([]string, len(metas))
+ loadedMap := make(map[string]*StatusMeta, len(metas))
+ for i, meta := range metas {
+ if meta.loaded == nil {
+ toLoadIDs[i] = meta.ID
+ loadedMap[meta.ID] = meta
+ }
+ }
+
+ // Load statuses with given IDs.
+ loaded, err := loadIDs(toLoadIDs)
+ if err != nil {
+ return gtserror.Newf("error loading statuses: %w", err)
+ }
+
+ // Update returned StatusMeta objects
+ // with newly loaded statuses by IDs.
+ for i := range loaded {
+ status := loaded[i]
+ meta := loadedMap[status.ID]
+ meta.loaded = status
+ }
+
+ return nil
+}
+
+// toStatusMeta converts a slice of database model statuses
+// into our cache wrapper type, a slice of []StatusMeta{}.
+func toStatusMeta(in []*StatusMeta, statuses []*gtsmodel.Status) []*StatusMeta {
+ return xslices.Gather(in, statuses, func(s *gtsmodel.Status) *StatusMeta {
+ return &StatusMeta{
+ ID: s.ID,
+ AccountID: s.AccountID,
+ BoostOfID: s.BoostOfID,
+ BoostOfAccountID: s.BoostOfAccountID,
+ loaded: s,
+ prepared: nil,
+ }
+ })
+}
+
+// doStatusFilter performs given filter function on provided statuses,
+func doStatusFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status) bool) []*gtsmodel.Status {
+
+ // Check for provided
+ // filter function.
+ if filter == nil {
+ return statuses
+ }
+
+ // Filter the provided input statuses.
+ return slices.DeleteFunc(statuses, filter)
+}
diff --git a/internal/cache/timeline/status_map.go b/internal/cache/timeline/status_map.go
new file mode 100644
index 000000000..e402883af
--- /dev/null
+++ b/internal/cache/timeline/status_map.go
@@ -0,0 +1,198 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package timeline
+
+import (
+ "maps"
+ "sync/atomic"
+)
+
+// StatusTimelines is a concurrency safe map of StatusTimeline{}
+// objects, optimizing *very heavily* for reads over writes.
+type StatusTimelines struct {
+ ptr atomic.Pointer[map[string]*StatusTimeline] // ronly except by CAS
+ cap int
+}
+
+// Init stores the given argument(s) such that any created StatusTimeline{}
+// objects by MustGet() will initialize them with the given arguments.
+func (t *StatusTimelines) Init(cap int) { t.cap = cap }
+
+// MustGet will attempt to fetch StatusTimeline{} stored under key, else creating one.
+func (t *StatusTimelines) MustGet(key string) *StatusTimeline {
+ var tt *StatusTimeline
+
+ for {
+ // Load current ptr.
+ cur := t.ptr.Load()
+
+ // Get timeline map to work on.
+ var m map[string]*StatusTimeline
+
+ if cur != nil {
+ // Look for existing
+ // timeline in cache.
+ tt = (*cur)[key]
+ if tt != nil {
+ return tt
+ }
+
+ // Get clone of current
+ // before modifications.
+ m = maps.Clone(*cur)
+ } else {
+ // Allocate new timeline map for below.
+ m = make(map[string]*StatusTimeline)
+ }
+
+ if tt == nil {
+ // Allocate new timeline.
+ tt = new(StatusTimeline)
+ tt.Init(t.cap)
+ }
+
+ // Store timeline
+ // in new map.
+ m[key] = tt
+
+ // Attempt to update the map ptr.
+ if !t.ptr.CompareAndSwap(cur, &m) {
+
+ // We failed the
+ // CAS, reloop.
+ continue
+ }
+
+ // Successfully inserted
+ // new timeline model.
+ return tt
+ }
+}
+
+// Delete will delete the stored StatusTimeline{} under key, if any.
+func (t *StatusTimelines) Delete(key string) {
+ for {
+ // Load current ptr.
+ cur := t.ptr.Load()
+
+ // Check for empty map / not in map.
+ if cur == nil || (*cur)[key] == nil {
+ return
+ }
+
+ // Get clone of current
+ // before modifications.
+ m := maps.Clone(*cur)
+
+ // Delete ID.
+ delete(m, key)
+
+ // Attempt to update the map ptr.
+ if !t.ptr.CompareAndSwap(cur, &m) {
+
+ // We failed the
+ // CAS, reloop.
+ continue
+ }
+
+ // Successfully
+ // deleted ID.
+ return
+ }
+}
+
+// RemoveByStatusIDs calls RemoveByStatusIDs() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) RemoveByStatusIDs(statusIDs ...string) {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.RemoveByStatusIDs(statusIDs...)
+ }
+ }
+}
+
+// RemoveByAccountIDs calls RemoveByAccountIDs() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) RemoveByAccountIDs(accountIDs ...string) {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.RemoveByAccountIDs(accountIDs...)
+ }
+ }
+}
+
+// UnprepareByStatusIDs calls UnprepareByStatusIDs() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) UnprepareByStatusIDs(statusIDs ...string) {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.UnprepareByStatusIDs(statusIDs...)
+ }
+ }
+}
+
+// UnprepareByAccountIDs calls UnprepareByAccountIDs() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) UnprepareByAccountIDs(accountIDs ...string) {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.UnprepareByAccountIDs(accountIDs...)
+ }
+ }
+}
+
+// Unprepare attempts to call UnprepareAll() for StatusTimeline{} under key.
+func (t *StatusTimelines) Unprepare(key string) {
+ if p := t.ptr.Load(); p != nil {
+ if tt := (*p)[key]; tt != nil {
+ tt.UnprepareAll()
+ }
+ }
+}
+
+// UnprepareAll calls UnprepareAll() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) UnprepareAll() {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.UnprepareAll()
+ }
+ }
+}
+
+// Trim calls Trim() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) Trim() {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.Trim()
+ }
+ }
+}
+
+// Clear attempts to call Clear() for StatusTimeline{} under key.
+func (t *StatusTimelines) Clear(key string) {
+ if p := t.ptr.Load(); p != nil {
+ if tt := (*p)[key]; tt != nil {
+ tt.Clear()
+ }
+ }
+}
+
+// ClearAll calls Clear() for each of the stored StatusTimeline{}s.
+func (t *StatusTimelines) ClearAll() {
+ if p := t.ptr.Load(); p != nil {
+ for _, tt := range *p {
+ tt.Clear()
+ }
+ }
+}
diff --git a/internal/cache/timeline/status_test.go b/internal/cache/timeline/status_test.go
new file mode 100644
index 000000000..3e53d8256
--- /dev/null
+++ b/internal/cache/timeline/status_test.go
@@ -0,0 +1,361 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package timeline
+
+import (
+ "slices"
+ "testing"
+
+ "codeberg.org/gruf/go-structr"
+ "github.com/stretchr/testify/assert"
+ apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+)
+
+var testStatusMeta = []*StatusMeta{
+ {
+ ID: "06B19VYTHEG01F3YW13RQE0QM8",
+ AccountID: "06B1A61MZEBBVDSNPRJAA8F2C4",
+ BoostOfID: "06B1A5KQWGQ1ABM3FA7TDX1PK8",
+ BoostOfAccountID: "06B1A6707818050PCK8SJAEC6G",
+ },
+ {
+ ID: "06B19VYTJFT0KDWT5C1CPY0XNC",
+ AccountID: "06B1A61MZN3ZQPZVNGEFBNYBJW",
+ BoostOfID: "06B1A5KQWSGFN4NNRV34KV5S9R",
+ BoostOfAccountID: "06B1A6707HY8RAXG7JPCWR7XD4",
+ },
+ {
+ ID: "06B19VYTJ6WZQPRVNJHPEZH04W",
+ AccountID: "06B1A61MZY7E0YB6G01VJX8ERR",
+ BoostOfID: "06B1A5KQX5NPGSYGH8NC7HR1GR",
+ BoostOfAccountID: "06B1A6707XCSAF0MVCGGYF9160",
+ },
+ {
+ ID: "06B19VYTJPKGG8JYCR1ENAV7KC",
+ AccountID: "06B1A61N07K1GC35PJ3CZ4M020",
+ BoostOfID: "06B1A5KQXG6ZCWE1R7C7KR7RYW",
+ BoostOfAccountID: "06B1A67084W6SB6P6HJB7K5DSG",
+ },
+ {
+ ID: "06B19VYTHRR8S35QXC5A6VE2YW",
+ AccountID: "06B1A61N0P1TGQDVKANNG4AKP4",
+ BoostOfID: "06B1A5KQY3K839Z6S5HHAJKSWW",
+ BoostOfAccountID: "06B1A6708SPJC3X3ZG3SGG8BN8",
+ },
+}
+
+func TestStatusTimelineUnprepare(t *testing.T) {
+ var tt StatusTimeline
+ tt.Init(1000)
+
+ // Clone the input test status data.
+ data := slices.Clone(testStatusMeta)
+
+ // Bodge some 'prepared'
+ // models on test data.
+ for _, meta := range data {
+ meta.prepared = &apimodel.Status{}
+ }
+
+ // Insert test data into timeline.
+ _ = tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Unprepare this status with ID.
+ tt.UnprepareByStatusIDs(meta.ID)
+
+ // Check the item is unprepared.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value.prepared)
+ }
+
+ // Clear and reinsert.
+ tt.cache.Clear()
+ tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Unprepare this status with boost ID.
+ tt.UnprepareByStatusIDs(meta.BoostOfID)
+
+ // Check the item is unprepared.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value.prepared)
+ }
+
+ // Clear and reinsert.
+ tt.cache.Clear()
+ tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Unprepare this status with account ID.
+ tt.UnprepareByAccountIDs(meta.AccountID)
+
+ // Check the item is unprepared.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value.prepared)
+ }
+
+ // Clear and reinsert.
+ tt.cache.Clear()
+ tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Unprepare this status with boost account ID.
+ tt.UnprepareByAccountIDs(meta.BoostOfAccountID)
+
+ // Check the item is unprepared.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value.prepared)
+ }
+}
+
+func TestStatusTimelineRemove(t *testing.T) {
+ var tt StatusTimeline
+ tt.Init(1000)
+
+ // Clone the input test status data.
+ data := slices.Clone(testStatusMeta)
+
+ // Insert test data into timeline.
+ _ = tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Remove this status with ID.
+ tt.RemoveByStatusIDs(meta.ID)
+
+ // Check the item is now gone.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value)
+ }
+
+ // Clear and reinsert.
+ tt.cache.Clear()
+ tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Remove this status with boost ID.
+ tt.RemoveByStatusIDs(meta.BoostOfID)
+
+ // Check the item is now gone.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value)
+ }
+
+ // Clear and reinsert.
+ tt.cache.Clear()
+ tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Remove this status with account ID.
+ tt.RemoveByAccountIDs(meta.AccountID)
+
+ // Check the item is now gone.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value)
+ }
+
+ // Clear and reinsert.
+ tt.cache.Clear()
+ tt.cache.Insert(data...)
+
+ for _, meta := range data {
+ // Remove this status with boost account ID.
+ tt.RemoveByAccountIDs(meta.BoostOfAccountID)
+
+ // Check the item is now gone.
+ value := getStatusByID(&tt, meta.ID)
+ assert.Nil(t, value)
+ }
+}
+
+func TestStatusTimelineInserts(t *testing.T) {
+ var tt StatusTimeline
+ tt.Init(1000)
+
+ // Clone the input test status data.
+ data := slices.Clone(testStatusMeta)
+
+ // Insert test data into timeline.
+ l := tt.cache.Insert(data...)
+ assert.Equal(t, len(data), l)
+
+ // Ensure 'min' value status
+ // in the timeline is expected.
+ minID := minStatusID(data)
+ assert.Equal(t, minID, minStatus(&tt).ID)
+
+ // Ensure 'max' value status
+ // in the timeline is expected.
+ maxID := maxStatusID(data)
+ assert.Equal(t, maxID, maxStatus(&tt).ID)
+
+ // Manually mark timeline as 'preloaded'.
+ tt.preloader.CheckPreload(tt.preloader.Done)
+
+ // Specifically craft a boost of latest (i.e. max) status in timeline.
+ boost := >smodel.Status{ID: "06B1A00PQWDZZH9WK9P5VND35C", BoostOfID: maxID}
+
+ // Insert boost into the timeline
+ // checking for 'repeatBoost' notifier.
+ repeatBoost := tt.InsertOne(boost, nil)
+ assert.True(t, repeatBoost)
+
+ // This should be the new 'max'
+ // and have 'repeatBoost' set.
+ newMax := maxStatus(&tt)
+ assert.Equal(t, boost.ID, newMax.ID)
+ assert.True(t, newMax.repeatBoost)
+
+ // Specifically craft 2 boosts of some unseen status in the timeline.
+ boost1 := >smodel.Status{ID: "06B1A121YEX02S0AY48X93JMDW", BoostOfID: "unseen"}
+ boost2 := >smodel.Status{ID: "06B1A12TG2NTJC9P270EQXS08M", BoostOfID: "unseen"}
+
+ // Insert boosts into the timeline, ensuring
+ // first is not 'repeat', but second one is.
+ repeatBoost1 := tt.InsertOne(boost1, nil)
+ repeatBoost2 := tt.InsertOne(boost2, nil)
+ assert.False(t, repeatBoost1)
+ assert.True(t, repeatBoost2)
+}
+
+func TestStatusTimelineTrim(t *testing.T) {
+ var tt StatusTimeline
+ tt.Init(1000)
+
+ // Clone the input test status data.
+ data := slices.Clone(testStatusMeta)
+
+ // Insert test data into timeline.
+ _ = tt.cache.Insert(data...)
+
+ // From here it'll be easier to have DESC sorted
+ // test data for reslicing and checking against.
+ slices.SortFunc(data, func(a, b *StatusMeta) int {
+ const k = +1
+ switch {
+ case a.ID < b.ID:
+ return +k
+ case b.ID < a.ID:
+ return -k
+ default:
+ return 0
+ }
+ })
+
+ // Set manual cutoff for trim.
+ tt.cut = len(data) - 1
+
+ // Perform trim.
+ tt.Trim()
+
+ // The post trim length should be tt.cut
+ assert.Equal(t, tt.cut, tt.cache.Len())
+
+ // It specifically should have removed
+ // the oldest (i.e. min) status element.
+ minID := data[len(data)-1].ID
+ assert.NotEqual(t, minID, minStatus(&tt).ID)
+ assert.False(t, containsStatusID(&tt, minID))
+
+ // Drop trimmed status.
+ data = data[:len(data)-1]
+
+ // Set smaller cutoff for trim.
+ tt.cut = len(data) - 2
+
+ // Perform trim.
+ tt.Trim()
+
+ // The post trim length should be tt.cut
+ assert.Equal(t, tt.cut, tt.cache.Len())
+
+ // It specifically should have removed
+ // the oldest 2 (i.e. min) status elements.
+ minID1 := data[len(data)-1].ID
+ minID2 := data[len(data)-2].ID
+ assert.NotEqual(t, minID1, minStatus(&tt).ID)
+ assert.NotEqual(t, minID2, minStatus(&tt).ID)
+ assert.False(t, containsStatusID(&tt, minID1))
+ assert.False(t, containsStatusID(&tt, minID2))
+
+ // Trim at desired length
+ // should cause no change.
+ before := tt.cache.Len()
+ tt.Trim()
+ assert.Equal(t, before, tt.cache.Len())
+}
+
+// containsStatusID returns whether timeline contains a status with ID.
+func containsStatusID(t *StatusTimeline, id string) bool {
+ return getStatusByID(t, id) != nil
+}
+
+// getStatusByID attempts to fetch status with given ID from timeline.
+func getStatusByID(t *StatusTimeline, id string) *StatusMeta {
+ for _, value := range t.cache.Range(structr.Desc) {
+ if value.ID == id {
+ return value
+ }
+ }
+ return nil
+}
+
+// maxStatus returns the newest (i.e. highest value ID) status in timeline.
+func maxStatus(t *StatusTimeline) *StatusMeta {
+ var meta *StatusMeta
+ for _, value := range t.cache.Range(structr.Desc) {
+ meta = value
+ break
+ }
+ return meta
+}
+
+// minStatus returns the oldest (i.e. lowest value ID) status in timeline.
+func minStatus(t *StatusTimeline) *StatusMeta {
+ var meta *StatusMeta
+ for _, value := range t.cache.Range(structr.Asc) {
+ meta = value
+ break
+ }
+ return meta
+}
+
+// minStatusID returns the oldest (i.e. lowest value ID) status in metas.
+func minStatusID(metas []*StatusMeta) string {
+ var min string
+ min = metas[0].ID
+ for i := 1; i < len(metas); i++ {
+ if metas[i].ID < min {
+ min = metas[i].ID
+ }
+ }
+ return min
+}
+
+// maxStatusID returns the newest (i.e. highest value ID) status in metas.
+func maxStatusID(metas []*StatusMeta) string {
+ var max string
+ max = metas[0].ID
+ for i := 1; i < len(metas); i++ {
+ if metas[i].ID > max {
+ max = metas[i].ID
+ }
+ }
+ return max
+}
diff --git a/internal/cache/timeline/timeline.go b/internal/cache/timeline/timeline.go
new file mode 100644
index 000000000..4f8797e82
--- /dev/null
+++ b/internal/cache/timeline/timeline.go
@@ -0,0 +1,59 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package timeline
+
+import (
+ "time"
+
+ "codeberg.org/gruf/go-structr"
+ "github.com/superseriousbusiness/gotosocial/internal/id"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
+)
+
+// plus1hULID returns a ULID for now+1h.
+func plus1hULID() string {
+ t := time.Now().Add(time.Hour)
+ return id.NewULIDFromTime(t)
+}
+
+// nextPageParams gets the next set of paging
+// parameters to use based on the current set,
+// and the next set of lo / hi values. This will
+// correctly handle making sure that, depending
+// on the paging order, the cursor value gets
+// updated while maintaining the boundary value.
+func nextPageParams(
+ page *paging.Page,
+ lastIdx string,
+ order paging.Order,
+) {
+ if order.Ascending() {
+ page.Min.Value = lastIdx
+ } else /* i.e. descending */ { //nolint:revive
+ page.Max.Value = lastIdx
+ }
+}
+
+// toDirection converts page order to timeline direction.
+func toDirection(order paging.Order) structr.Direction {
+ if order.Ascending() {
+ return structr.Asc
+ } else /* i.e. descending */ { //nolint:revive
+ return structr.Desc
+ }
+}
diff --git a/internal/cache/wrappers.go b/internal/cache/wrappers.go
index 9cb4fca98..34d7cb8db 100644
--- a/internal/cache/wrappers.go
+++ b/internal/cache/wrappers.go
@@ -27,19 +27,19 @@ import (
// SliceCache wraps a simple.Cache to provide simple loader-callback
// functions for fetching + caching slices of objects (e.g. IDs).
type SliceCache[T any] struct {
- cache simple.Cache[string, []T]
+ simple.Cache[string, []T]
}
// Init initializes the cache with given length + capacity.
func (c *SliceCache[T]) Init(len, cap int) {
- c.cache = simple.Cache[string, []T]{}
- c.cache.Init(len, cap)
+ c.Cache = simple.Cache[string, []T]{}
+ c.Cache.Init(len, cap)
}
// Load will attempt to load an existing slice from cache for key, else calling load function and caching the result.
func (c *SliceCache[T]) Load(key string, load func() ([]T, error)) ([]T, error) {
// Look for cached values.
- data, ok := c.cache.Get(key)
+ data, ok := c.Cache.Get(key)
if !ok {
var err error
@@ -51,7 +51,7 @@ func (c *SliceCache[T]) Load(key string, load func() ([]T, error)) ([]T, error)
}
// Store the data.
- c.cache.Set(key, data)
+ c.Cache.Set(key, data)
}
// Return data clone for safety.
@@ -60,27 +60,7 @@ func (c *SliceCache[T]) Load(key string, load func() ([]T, error)) ([]T, error)
// Invalidate: see simple.Cache{}.InvalidateAll().
func (c *SliceCache[T]) Invalidate(keys ...string) {
- _ = c.cache.InvalidateAll(keys...)
-}
-
-// Trim: see simple.Cache{}.Trim().
-func (c *SliceCache[T]) Trim(perc float64) {
- c.cache.Trim(perc)
-}
-
-// Clear: see simple.Cache{}.Clear().
-func (c *SliceCache[T]) Clear() {
- c.cache.Clear()
-}
-
-// Len: see simple.Cache{}.Len().
-func (c *SliceCache[T]) Len() int {
- return c.cache.Len()
-}
-
-// Cap: see simple.Cache{}.Cap().
-func (c *SliceCache[T]) Cap() int {
- return c.cache.Cap()
+ _ = c.Cache.InvalidateAll(keys...)
}
// StructCache wraps a structr.Cache{} to simple index caching
@@ -89,17 +69,17 @@ func (c *SliceCache[T]) Cap() int {
// name under the main database caches struct which would reduce
// time required to access cached values).
type StructCache[StructType any] struct {
- cache structr.Cache[StructType]
+ structr.Cache[StructType]
index map[string]*structr.Index
}
// Init initializes the cache with given structr.CacheConfig{}.
func (c *StructCache[T]) Init(config structr.CacheConfig[T]) {
c.index = make(map[string]*structr.Index, len(config.Indices))
- c.cache = structr.Cache[T]{}
- c.cache.Init(config)
+ c.Cache = structr.Cache[T]{}
+ c.Cache.Init(config)
for _, cfg := range config.Indices {
- c.index[cfg.Fields] = c.cache.Index(cfg.Fields)
+ c.index[cfg.Fields] = c.Cache.Index(cfg.Fields)
}
}
@@ -107,26 +87,21 @@ func (c *StructCache[T]) Init(config structr.CacheConfig[T]) {
// Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}.
func (c *StructCache[T]) GetOne(index string, key ...any) (T, bool) {
i := c.index[index]
- return c.cache.GetOne(i, i.Key(key...))
+ return c.Cache.GetOne(i, i.Key(key...))
}
// Get calls structr.Cache{}.Get(), using a cached structr.Index{} by 'index' name.
// Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}.
func (c *StructCache[T]) Get(index string, keys ...[]any) []T {
i := c.index[index]
- return c.cache.Get(i, i.Keys(keys...)...)
-}
-
-// Put: see structr.Cache{}.Put().
-func (c *StructCache[T]) Put(values ...T) {
- c.cache.Put(values...)
+ return c.Cache.Get(i, i.Keys(keys...)...)
}
// LoadOne calls structr.Cache{}.LoadOne(), using a cached structr.Index{} by 'index' name.
// Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}.
func (c *StructCache[T]) LoadOne(index string, load func() (T, error), key ...any) (T, error) {
i := c.index[index]
- return c.cache.LoadOne(i, i.Key(key...), load)
+ return c.Cache.LoadOne(i, i.Key(key...), load)
}
// LoadIDs calls structr.Cache{}.Load(), using a cached structr.Index{} by 'index' name. Note: this also handles
@@ -149,7 +124,7 @@ func (c *StructCache[T]) LoadIDs(index string, ids []string, load func([]string)
}
// Pass loader callback with wrapper onto main cache load function.
- return c.cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) {
+ return c.Cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) {
uncachedIDs := make([]string, len(uncached))
for i := range uncached {
uncachedIDs[i] = uncached[i].Values()[0].(string)
@@ -177,7 +152,7 @@ func (c *StructCache[T]) LoadIDs2Part(index string, id1 string, id2s []string, l
}
// Pass loader callback with wrapper onto main cache load function.
- return c.cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) {
+ return c.Cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) {
uncachedIDs := make([]string, len(uncached))
for i := range uncached {
uncachedIDs[i] = uncached[i].Values()[1].(string)
@@ -186,16 +161,11 @@ func (c *StructCache[T]) LoadIDs2Part(index string, id1 string, id2s []string, l
})
}
-// Store: see structr.Cache{}.Store().
-func (c *StructCache[T]) Store(value T, store func() error) error {
- return c.cache.Store(value, store)
-}
-
// Invalidate calls structr.Cache{}.Invalidate(), using a cached structr.Index{} by 'index' name.
// Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}.
func (c *StructCache[T]) Invalidate(index string, key ...any) {
i := c.index[index]
- c.cache.Invalidate(i, i.Key(key...))
+ c.Cache.Invalidate(i, i.Key(key...))
}
// InvalidateIDs calls structr.Cache{}.Invalidate(), using a cached structr.Index{} by 'index' name. Note: this also
@@ -218,25 +188,5 @@ func (c *StructCache[T]) InvalidateIDs(index string, ids []string) {
}
// Pass to main invalidate func.
- c.cache.Invalidate(i, keys...)
-}
-
-// Trim: see structr.Cache{}.Trim().
-func (c *StructCache[T]) Trim(perc float64) {
- c.cache.Trim(perc)
-}
-
-// Clear: see structr.Cache{}.Clear().
-func (c *StructCache[T]) Clear() {
- c.cache.Clear()
-}
-
-// Len: see structr.Cache{}.Len().
-func (c *StructCache[T]) Len() int {
- return c.cache.Len()
-}
-
-// Cap: see structr.Cache{}.Cap().
-func (c *StructCache[T]) Cap() int {
- return c.cache.Cap()
+ c.Cache.Invalidate(i, keys...)
}
diff --git a/internal/cleaner/media_test.go b/internal/cleaner/media_test.go
index afa015783..54afc1c0b 100644
--- a/internal/cleaner/media_test.go
+++ b/internal/cleaner/media_test.go
@@ -29,14 +29,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/admin"
"github.com/superseriousbusiness/gotosocial/internal/cleaner"
"github.com/superseriousbusiness/gotosocial/internal/db"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/transport"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -74,12 +72,6 @@ func (suite *MediaTestSuite) SetupTest() {
testrig.StandardStorageSetup(suite.storage, "../../testrig/media")
testrig.StandardDBSetup(suite.db, nil)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.testAttachments = testrig.NewTestAttachments()
suite.testAccounts = testrig.NewTestAccounts()
suite.testEmojis = testrig.NewTestEmojis()
diff --git a/internal/db/bundb/bundb_test.go b/internal/db/bundb/bundb_test.go
index c128eca27..dc22dbcf4 100644
--- a/internal/db/bundb/bundb_test.go
+++ b/internal/db/bundb/bundb_test.go
@@ -20,10 +20,8 @@ package bundb_test
import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/db"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -90,8 +88,6 @@ func (suite *BunDBStandardTestSuite) SetupTest() {
testrig.InitTestLog()
suite.state.Caches.Init()
suite.db = testrig.NewTestDB(&suite.state)
- converter := typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(&suite.state, visibility.NewFilter(&suite.state), converter)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
}
diff --git a/internal/db/bundb/list.go b/internal/db/bundb/list.go
index f81c59c42..75296bc57 100644
--- a/internal/db/bundb/list.go
+++ b/internal/db/bundb/list.go
@@ -87,7 +87,7 @@ func (l *listDB) getList(ctx context.Context, lookup string, dbQuery func(*gtsmo
}
func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) {
- listIDs, err := l.getListIDsByAccountID(ctx, accountID)
+ listIDs, err := l.GetListIDsByAccountID(ctx, accountID)
if err != nil {
return nil, err
}
@@ -95,7 +95,7 @@ func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]*
}
func (l *listDB) CountListsByAccountID(ctx context.Context, accountID string) (int, error) {
- listIDs, err := l.getListIDsByAccountID(ctx, accountID)
+ listIDs, err := l.GetListIDsByAccountID(ctx, accountID)
return len(listIDs), err
}
@@ -176,10 +176,8 @@ func (l *listDB) UpdateList(ctx context.Context, list *gtsmodel.List, columns ..
return err
}
- // Invalidate this entire list's timeline.
- if err := l.state.Timelines.List.RemoveTimeline(ctx, list.ID); err != nil {
- log.Errorf(ctx, "error invalidating list timeline: %q", err)
- }
+ // Clear cached timeline associated with list ID.
+ l.state.Caches.Timelines.List.Clear(list.ID)
return nil
}
@@ -221,10 +219,13 @@ func (l *listDB) DeleteListByID(ctx context.Context, id string) error {
// Invalidate all related entry caches for this list.
l.invalidateEntryCaches(ctx, []string{id}, followIDs)
+ // Delete the cached timeline of list.
+ l.state.Caches.Timelines.List.Delete(id)
+
return nil
}
-func (l *listDB) getListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) {
+func (l *listDB) GetListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) {
return l.state.Caches.DB.ListIDs.Load("a"+accountID, func() ([]string, error) {
var listIDs []string
@@ -461,10 +462,8 @@ func (l *listDB) invalidateEntryCaches(ctx context.Context, listIDs, followIDs [
"f"+listID,
)
- // Invalidate the timeline for the list this entry belongs to.
- if err := l.state.Timelines.List.RemoveTimeline(ctx, listID); err != nil {
- log.Errorf(ctx, "error invalidating list timeline: %q", err)
- }
+ // Invalidate list timeline cache by ID.
+ l.state.Caches.Timelines.List.Clear(listID)
}
// Invalidate ListedID slice cache entries.
diff --git a/internal/db/bundb/timeline.go b/internal/db/bundb/timeline.go
index 404cb6601..8278de647 100644
--- a/internal/db/bundb/timeline.go
+++ b/internal/db/bundb/timeline.go
@@ -20,15 +20,13 @@ package bundb
import (
"context"
"errors"
- "fmt"
"slices"
- "time"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/id"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/uptrace/bun"
)
@@ -38,346 +36,147 @@ type timelineDB struct {
state *state.State
}
-func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error) {
- // Ensure reasonable
- if limit < 0 {
- limit = 0
- }
+func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, page *paging.Page) ([]*gtsmodel.Status, error) {
+ return loadStatusTimelinePage(ctx, t.db, t.state,
- // Make educated guess for slice size
- var (
- statusIDs = make([]string, 0, limit)
- frontToBack = true
- )
+ // Paging
+ // params.
+ page,
- // As this is the home timeline, it should be
- // populated by statuses from accounts followed
- // by accountID, and posts from accountID itself.
- //
- // So, begin by seeing who accountID follows.
- // It should be a little cheaper to do this in
- // a separate query like this, rather than using
- // a join, since followIDs are cached in memory.
- follows, err := t.state.DB.GetAccountFollows(
- gtscontext.SetBarebones(ctx),
- accountID,
- nil, // select all
- )
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- return nil, gtserror.Newf("db error getting follows for account %s: %w", accountID, err)
- }
+ // The actual meat of the home-timeline query, outside
+ // of any paging parameters that selects by followings.
+ func(q *bun.SelectQuery) (*bun.SelectQuery, error) {
- // To take account of exclusive lists, get all of
- // this account's lists, so we can filter out follows
- // that are in contained in exclusive lists.
- lists, err := t.state.DB.GetListsByAccountID(ctx, accountID)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- return nil, gtserror.Newf("db error getting lists for account %s: %w", accountID, err)
- }
-
- // Index all follow IDs that fall in exclusive lists.
- ignoreFollowIDs := make(map[string]struct{})
- for _, list := range lists {
- if !*list.Exclusive {
- // Not exclusive,
- // we don't care.
- continue
- }
-
- // Fetch all follow IDs of the entries ccontained in this list.
- listFollowIDs, err := t.state.DB.GetFollowIDsInList(ctx, list.ID, nil)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- return nil, gtserror.Newf("db error getting list entry follow ids: %w", err)
- }
-
- // Exclusive list, index all its follow IDs.
- for _, followID := range listFollowIDs {
- ignoreFollowIDs[followID] = struct{}{}
- }
- }
-
- // Extract just the accountID from each follow,
- // ignoring follows that are in exclusive lists.
- targetAccountIDs := make([]string, 0, len(follows)+1)
- for _, f := range follows {
- _, ignore := ignoreFollowIDs[f.ID]
- if !ignore {
- targetAccountIDs = append(
- targetAccountIDs,
- f.TargetAccountID,
+ // As this is the home timeline, it should be
+ // populated by statuses from accounts followed
+ // by accountID, and posts from accountID itself.
+ //
+ // So, begin by seeing who accountID follows.
+ // It should be a little cheaper to do this in
+ // a separate query like this, rather than using
+ // a join, since followIDs are cached in memory.
+ follows, err := t.state.DB.GetAccountFollows(
+ gtscontext.SetBarebones(ctx),
+ accountID,
+ nil, // select all
)
- }
- }
+ if err != nil && !errors.Is(err, db.ErrNoEntries) {
+ return nil, gtserror.Newf("db error getting follows for account %s: %w", accountID, err)
+ }
- // Add accountID itself as a pseudo follow so that
- // accountID can see its own posts in the timeline.
- targetAccountIDs = append(targetAccountIDs, accountID)
+ // To take account of exclusive lists, get all of
+ // this account's lists, so we can filter out follows
+ // that are in contained in exclusive lists.
+ lists, err := t.state.DB.GetListsByAccountID(ctx, accountID)
+ if err != nil && !errors.Is(err, db.ErrNoEntries) {
+ return nil, gtserror.Newf("db error getting lists for account %s: %w", accountID, err)
+ }
- // Now start building the database query.
- q := t.db.
- NewSelect().
- TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
- // Select only IDs from table
- Column("status.id")
+ // Index all follow IDs that fall in exclusive lists.
+ ignoreFollowIDs := make(map[string]struct{})
+ for _, list := range lists {
+ if !*list.Exclusive {
+ // Not exclusive,
+ // we don't care.
+ continue
+ }
- if maxID == "" || maxID >= id.Highest {
- const future = 24 * time.Hour
+ // Fetch all follow IDs of the entries ccontained in this list.
+ listFollowIDs, err := t.state.DB.GetFollowIDsInList(ctx, list.ID, nil)
+ if err != nil && !errors.Is(err, db.ErrNoEntries) {
+ return nil, gtserror.Newf("db error getting list entry follow ids: %w", err)
+ }
- // don't return statuses more than 24hr in the future
- maxID = id.NewULIDFromTime(time.Now().Add(future))
- }
+ // Exclusive list, index all its follow IDs.
+ for _, followID := range listFollowIDs {
+ ignoreFollowIDs[followID] = struct{}{}
+ }
+ }
- // return only statuses LOWER (ie., older) than maxID
- q = q.Where("? < ?", bun.Ident("status.id"), maxID)
+ // Extract just the accountID from each follow,
+ // ignoring follows that are in exclusive lists.
+ targetAccountIDs := make([]string, 0, len(follows)+1)
+ for _, f := range follows {
+ _, ignore := ignoreFollowIDs[f.ID]
+ if !ignore {
+ targetAccountIDs = append(
+ targetAccountIDs,
+ f.TargetAccountID,
+ )
+ }
+ }
- if sinceID != "" {
- // return only statuses HIGHER (ie., newer) than sinceID
- q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
- }
+ // Add accountID itself as a pseudo follow so that
+ // accountID can see its own posts in the timeline.
+ targetAccountIDs = append(targetAccountIDs, accountID)
- if minID != "" {
- // return only statuses HIGHER (ie., newer) than minID
- q = q.Where("? > ?", bun.Ident("status.id"), minID)
+ // Select only statuses authored by
+ // accounts with IDs in the slice.
+ q = q.Where(
+ "? IN (?)",
+ bun.Ident("account_id"),
+ bun.In(targetAccountIDs),
+ )
- // page up
- frontToBack = false
- }
+ // Only include statuses that aren't pending approval.
+ q = q.Where("NOT ? = ?", bun.Ident("pending_approval"), true)
- if local {
- // return only statuses posted by local account havers
- q = q.Where("? = ?", bun.Ident("status.local"), local)
- }
-
- // Select only statuses authored by
- // accounts with IDs in the slice.
- q = q.Where(
- "? IN (?)",
- bun.Ident("status.account_id"),
- bun.In(targetAccountIDs),
+ return q, nil
+ },
)
-
- // Only include statuses that aren't pending approval.
- q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
-
- if limit > 0 {
- // limit amount of statuses returned
- q = q.Limit(limit)
- }
-
- if frontToBack {
- // Page down.
- q = q.Order("status.id DESC")
- } else {
- // Page up.
- q = q.Order("status.id ASC")
- }
-
- if err := q.Scan(ctx, &statusIDs); err != nil {
- return nil, err
- }
-
- if len(statusIDs) == 0 {
- return nil, nil
- }
-
- // If we're paging up, we still want statuses
- // to be sorted by ID desc, so reverse ids slice.
- // https://zchee.github.io/golang-wiki/SliceTricks/#reversing
- if !frontToBack {
- for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
- statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
- }
- }
-
- // Return status IDs loaded from cache + db.
- return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
}
-func (t *timelineDB) GetPublicTimeline(
- ctx context.Context,
- maxID string,
- sinceID string,
- minID string,
- limit int,
- local bool,
-) ([]*gtsmodel.Status, error) {
- // Ensure reasonable
- if limit < 0 {
- limit = 0
- }
+func (t *timelineDB) GetPublicTimeline(ctx context.Context, page *paging.Page) ([]*gtsmodel.Status, error) {
+ return loadStatusTimelinePage(ctx, t.db, t.state,
- if local {
- return t.getLocalTimeline(
- ctx,
- maxID,
- sinceID,
- minID,
- limit,
- )
- }
+ // Paging
+ // params.
+ page,
- // Make educated guess for slice size
- var (
- statusIDs = make([]string, 0, limit)
- frontToBack = true
+ func(q *bun.SelectQuery) (*bun.SelectQuery, error) {
+ // Public only.
+ q = q.Where("? = ?", bun.Ident("visibility"), gtsmodel.VisibilityPublic)
+
+ // Ignore boosts.
+ q = q.Where("? IS NULL", bun.Ident("boost_of_id"))
+
+ // Only include statuses that aren't pending approval.
+ q = q.Where("? = ?", bun.Ident("pending_approval"), false)
+
+ return q, nil
+ },
)
-
- q := t.db.
- NewSelect().
- TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
- // Public only.
- Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
- // Ignore boosts.
- Where("? IS NULL", bun.Ident("status.boost_of_id")).
- // Only include statuses that aren't pending approval.
- Where("? = ?", bun.Ident("status.pending_approval"), false).
- // Select only IDs from table
- Column("status.id")
-
- if maxID == "" || maxID >= id.Highest {
- const future = 24 * time.Hour
-
- // don't return statuses more than 24hr in the future
- maxID = id.NewULIDFromTime(time.Now().Add(future))
- }
-
- // return only statuses LOWER (ie., older) than maxID
- q = q.Where("? < ?", bun.Ident("status.id"), maxID)
-
- if sinceID != "" {
- // return only statuses HIGHER (ie., newer) than sinceID
- q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
- }
-
- if minID != "" {
- // return only statuses HIGHER (ie., newer) than minID
- q = q.Where("? > ?", bun.Ident("status.id"), minID)
-
- // page up
- frontToBack = false
- }
-
- if limit > 0 {
- // limit amount of statuses returned
- q = q.Limit(limit)
- }
-
- if frontToBack {
- // Page down.
- q = q.Order("status.id DESC")
- } else {
- // Page up.
- q = q.Order("status.id ASC")
- }
-
- if err := q.Scan(ctx, &statusIDs); err != nil {
- return nil, err
- }
-
- if len(statusIDs) == 0 {
- return nil, nil
- }
-
- // If we're paging up, we still want statuses
- // to be sorted by ID desc, so reverse ids slice.
- // https://zchee.github.io/golang-wiki/SliceTricks/#reversing
- if !frontToBack {
- for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
- statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
- }
- }
-
- // Return status IDs loaded from cache + db.
- return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
}
-func (t *timelineDB) getLocalTimeline(
- ctx context.Context,
- maxID string,
- sinceID string,
- minID string,
- limit int,
-) ([]*gtsmodel.Status, error) {
- // Make educated guess for slice size
- var (
- statusIDs = make([]string, 0, limit)
- frontToBack = true
+func (t *timelineDB) GetLocalTimeline(ctx context.Context, page *paging.Page) ([]*gtsmodel.Status, error) {
+ return loadStatusTimelinePage(ctx, t.db, t.state,
+
+ // Paging
+ // params.
+ page,
+
+ func(q *bun.SelectQuery) (*bun.SelectQuery, error) {
+ // Local only.
+ q = q.Where("? = ?", bun.Ident("local"), true)
+
+ // Public only.
+ q = q.Where("? = ?", bun.Ident("visibility"), gtsmodel.VisibilityPublic)
+
+ // Only include statuses that aren't pending approval.
+ q = q.Where("? = ?", bun.Ident("pending_approval"), false)
+
+ // Ignore boosts.
+ q = q.Where("? IS NULL", bun.Ident("boost_of_id"))
+
+ return q, nil
+ },
)
-
- q := t.db.
- NewSelect().
- TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
- // Local only.
- Where("? = ?", bun.Ident("status.local"), true).
- // Public only.
- Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
- // Only include statuses that aren't pending approval.
- Where("? = ?", bun.Ident("status.pending_approval"), false).
- // Ignore boosts.
- Where("? IS NULL", bun.Ident("status.boost_of_id")).
- // Select only IDs from table
- Column("status.id")
-
- if maxID == "" || maxID >= id.Highest {
- const future = 24 * time.Hour
-
- // don't return statuses more than 24hr in the future
- maxID = id.NewULIDFromTime(time.Now().Add(future))
- }
-
- // return only statuses LOWER (ie., older) than maxID
- q = q.Where("? < ?", bun.Ident("status.id"), maxID)
-
- if sinceID != "" {
- // return only statuses HIGHER (ie., newer) than sinceID
- q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
- }
-
- if minID != "" {
- // return only statuses HIGHER (ie., newer) than minID
- q = q.Where("? > ?", bun.Ident("status.id"), minID)
-
- // page up
- frontToBack = false
- }
-
- if limit > 0 {
- // limit amount of statuses returned
- q = q.Limit(limit)
- }
-
- if frontToBack {
- // Page down.
- q = q.Order("status.id DESC")
- } else {
- // Page up.
- q = q.Order("status.id ASC")
- }
-
- if err := q.Scan(ctx, &statusIDs); err != nil {
- return nil, err
- }
-
- if len(statusIDs) == 0 {
- return nil, nil
- }
-
- // If we're paging up, we still want statuses
- // to be sorted by ID desc, so reverse ids slice.
- // https://zchee.github.io/golang-wiki/SliceTricks/#reversing
- if !frontToBack {
- for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
- statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
- }
- }
-
- // Return status IDs loaded from cache + db.
- return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
}
// TODO optimize this query and the logic here, because it's slow as balls -- it takes like a literal second to return with a limit of 20!
// It might be worth serving it through a timeline instead of raw DB queries, like we do for Home feeds.
func (t *timelineDB) GetFavedTimeline(ctx context.Context, accountID string, maxID string, minID string, limit int) ([]*gtsmodel.Status, string, string, error) {
+
// Ensure reasonable
if limit < 0 {
limit = 0
@@ -442,205 +241,130 @@ func (t *timelineDB) GetFavedTimeline(ctx context.Context, accountID string, max
return statuses, nextMaxID, prevMinID, nil
}
-func (t *timelineDB) GetListTimeline(
+func (t *timelineDB) GetListTimeline(ctx context.Context, listID string, page *paging.Page) ([]*gtsmodel.Status, error) {
+ return loadStatusTimelinePage(ctx, t.db, t.state,
+
+ // Paging
+ // params.
+ page,
+
+ // The actual meat of the list-timeline query, outside
+ // of any paging parameters, it selects by list entries.
+ func(q *bun.SelectQuery) (*bun.SelectQuery, error) {
+
+ // Fetch all follow IDs contained in list from DB.
+ followIDs, err := t.state.DB.GetFollowIDsInList(
+ ctx, listID, nil,
+ )
+ if err != nil {
+ return nil, gtserror.Newf("error getting follows in list: %w", err)
+ }
+
+ // Select target account
+ // IDs from list follows.
+ subQ := t.db.NewSelect().
+ Table("follows").
+ Column("follows.target_account_id").
+ Where("? IN (?)", bun.Ident("follows.id"), bun.In(followIDs))
+ q = q.Where("? IN (?)", bun.Ident("statuses.account_id"), subQ)
+
+ // Only include statuses that aren't pending approval.
+ q = q.Where("NOT ? = ?", bun.Ident("pending_approval"), true)
+
+ return q, nil
+ },
+ )
+}
+
+func (t *timelineDB) GetTagTimeline(ctx context.Context, tagID string, page *paging.Page) ([]*gtsmodel.Status, error) {
+ return loadStatusTimelinePage(ctx, t.db, t.state,
+
+ // Paging
+ // params.
+ page,
+
+ // The actual meat of the list-timeline query, outside of any
+ // paging params, selects by status tags with public visibility.
+ func(q *bun.SelectQuery) (*bun.SelectQuery, error) {
+
+ // ...
+ q = q.Join(
+ "INNER JOIN ? ON ? = ?",
+ bun.Ident("status_to_tags"),
+ bun.Ident("statuses.id"), bun.Ident("status_to_tags.status_id"),
+ )
+
+ // This tag only.
+ q = q.Where("? = ?", bun.Ident("status_to_tags.tag_id"), tagID)
+
+ // Public only.
+ q = q.Where("? = ?", bun.Ident("visibility"), gtsmodel.VisibilityPublic)
+
+ return q, nil
+ },
+ )
+}
+
+func loadStatusTimelinePage(
ctx context.Context,
- listID string,
- maxID string,
- sinceID string,
- minID string,
- limit int,
-) ([]*gtsmodel.Status, error) {
- // Ensure reasonable
- if limit < 0 {
- limit = 0
- }
+ db *bun.DB,
+ state *state.State,
+ page *paging.Page,
+ query func(*bun.SelectQuery) (*bun.SelectQuery, error),
+) (
+ []*gtsmodel.Status,
+ error,
+) {
+ // Extract page params.
+ minID := page.Min.Value
+ maxID := page.Max.Value
+ limit := page.Limit
+ order := page.Order()
- // Make educated guess for slice size
- var (
- statusIDs = make([]string, 0, limit)
- frontToBack = true
- )
+ // Pre-allocate slice of IDs as dest.
+ statusIDs := make([]string, 0, limit)
- // Fetch all follow IDs contained in list from DB.
- followIDs, err := t.state.DB.GetFollowIDsInList(
- ctx, listID, nil,
- )
+ // Now start building the database query.
+ //
+ // Select the following:
+ // - status ID
+ q := db.NewSelect().
+ Table("statuses").
+ Column("id")
+
+ // Append caller
+ // query details.
+ q, err := query(q)
if err != nil {
- return nil, fmt.Errorf("error getting follows in list: %w", err)
+ return nil, err
}
- // If there's no list follows we can't
- // possibly return anything for this list.
- if len(followIDs) == 0 {
- return make([]*gtsmodel.Status, 0), nil
- }
-
- // Select target account IDs from follows.
- subQ := t.db.
- NewSelect().
- TableExpr("? AS ?", bun.Ident("follows"), bun.Ident("follow")).
- Column("follow.target_account_id").
- Where("? IN (?)", bun.Ident("follow.id"), bun.In(followIDs))
-
- // Select only status IDs created
- // by one of the followed accounts.
- q := t.db.
- NewSelect().
- TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
- // Select only IDs from table
- Column("status.id").
- Where("? IN (?)", bun.Ident("status.account_id"), subQ)
-
- if maxID == "" || maxID >= id.Highest {
- const future = 24 * time.Hour
-
- // don't return statuses more than 24hr in the future
- maxID = id.NewULIDFromTime(time.Now().Add(future))
- }
-
- // return only statuses LOWER (ie., older) than maxID
- q = q.Where("? < ?", bun.Ident("status.id"), maxID)
-
- if sinceID != "" {
- // return only statuses HIGHER (ie., newer) than sinceID
- q = q.Where("? > ?", bun.Ident("status.id"), sinceID)
+ if maxID != "" {
+ // Set a maximum ID boundary if was given.
+ q = q.Where("? < ?", bun.Ident("id"), maxID)
}
if minID != "" {
- // return only statuses HIGHER (ie., newer) than minID
- q = q.Where("? > ?", bun.Ident("status.id"), minID)
-
- // page up
- frontToBack = false
+ // Set a minimum ID boundary if was given.
+ q = q.Where("? > ?", bun.Ident("id"), minID)
}
- // Only include statuses that aren't pending approval.
- q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
-
- if limit > 0 {
- // limit amount of statuses returned
- q = q.Limit(limit)
+ // Set query ordering.
+ if order.Ascending() {
+ q = q.OrderExpr("? ASC", bun.Ident("id"))
+ } else /* i.e. descending */ {
+ q = q.OrderExpr("? DESC", bun.Ident("id"))
}
- if frontToBack {
- // Page down.
- q = q.Order("status.id DESC")
- } else {
- // Page up.
- q = q.Order("status.id ASC")
- }
+ // A limit should always
+ // be supplied for this.
+ q = q.Limit(limit)
+ // Finally, perform query into status ID slice.
if err := q.Scan(ctx, &statusIDs); err != nil {
return nil, err
}
- if len(statusIDs) == 0 {
- return nil, nil
- }
-
- // If we're paging up, we still want statuses
- // to be sorted by ID desc, so reverse ids slice.
- // https://zchee.github.io/golang-wiki/SliceTricks/#reversing
- if !frontToBack {
- for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
- statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
- }
- }
-
- // Return status IDs loaded from cache + db.
- return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
-}
-
-func (t *timelineDB) GetTagTimeline(
- ctx context.Context,
- tagID string,
- maxID string,
- sinceID string,
- minID string,
- limit int,
-) ([]*gtsmodel.Status, error) {
- // Ensure reasonable
- if limit < 0 {
- limit = 0
- }
-
- // Make educated guess for slice size
- var (
- statusIDs = make([]string, 0, limit)
- frontToBack = true
- )
-
- q := t.db.
- NewSelect().
- TableExpr("? AS ?", bun.Ident("status_to_tags"), bun.Ident("status_to_tag")).
- Column("status_to_tag.status_id").
- // Join with statuses for filtering.
- Join(
- "INNER JOIN ? AS ? ON ? = ?",
- bun.Ident("statuses"), bun.Ident("status"),
- bun.Ident("status.id"), bun.Ident("status_to_tag.status_id"),
- ).
- // Public only.
- Where("? = ?", bun.Ident("status.visibility"), gtsmodel.VisibilityPublic).
- // This tag only.
- Where("? = ?", bun.Ident("status_to_tag.tag_id"), tagID)
-
- if maxID == "" || maxID >= id.Highest {
- const future = 24 * time.Hour
-
- // don't return statuses more than 24hr in the future
- maxID = id.NewULIDFromTime(time.Now().Add(future))
- }
-
- // return only statuses LOWER (ie., older) than maxID
- q = q.Where("? < ?", bun.Ident("status_to_tag.status_id"), maxID)
-
- if sinceID != "" {
- // return only statuses HIGHER (ie., newer) than sinceID
- q = q.Where("? > ?", bun.Ident("status_to_tag.status_id"), sinceID)
- }
-
- if minID != "" {
- // return only statuses HIGHER (ie., newer) than minID
- q = q.Where("? > ?", bun.Ident("status_to_tag.status_id"), minID)
-
- // page up
- frontToBack = false
- }
-
- // Only include statuses that aren't pending approval.
- q = q.Where("NOT ? = ?", bun.Ident("status.pending_approval"), true)
-
- if limit > 0 {
- // limit amount of statuses returned
- q = q.Limit(limit)
- }
-
- if frontToBack {
- // Page down.
- q = q.Order("status_to_tag.status_id DESC")
- } else {
- // Page up.
- q = q.Order("status_to_tag.status_id ASC")
- }
-
- if err := q.Scan(ctx, &statusIDs); err != nil {
- return nil, err
- }
-
- if len(statusIDs) == 0 {
- return nil, nil
- }
-
- // If we're paging up, we still want statuses
- // to be sorted by ID desc, so reverse ids slice.
- // https://zchee.github.io/golang-wiki/SliceTricks/#reversing
- if !frontToBack {
- for l, r := 0, len(statusIDs)-1; l < r; l, r = l+1, r-1 {
- statusIDs[l], statusIDs[r] = statusIDs[r], statusIDs[l]
- }
- }
-
- // Return status IDs loaded from cache + db.
- return t.state.DB.GetStatusesByIDs(ctx, statusIDs)
+ // Fetch statuses from DB / cache with given IDs.
+ return state.DB.GetStatusesByIDs(ctx, statusIDs)
}
diff --git a/internal/db/bundb/timeline_test.go b/internal/db/bundb/timeline_test.go
index 4988ab362..9652df33a 100644
--- a/internal/db/bundb/timeline_test.go
+++ b/internal/db/bundb/timeline_test.go
@@ -20,14 +20,12 @@ package bundb_test
import (
"context"
"testing"
- "time"
- "codeberg.org/gruf/go-kv"
"github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/ap"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
@@ -35,38 +33,6 @@ type TimelineTestSuite struct {
BunDBStandardTestSuite
}
-func getFutureStatus() *gtsmodel.Status {
- theDistantFuture := time.Now().Add(876600 * time.Hour)
- id := id.NewULIDFromTime(theDistantFuture)
-
- return >smodel.Status{
- ID: id,
- URI: "http://localhost:8080/users/admin/statuses/" + id,
- URL: "http://localhost:8080/@admin/statuses/" + id,
- Content: "it's the future, wooooooooooooooooooooooooooooooooo",
- Text: "it's the future, wooooooooooooooooooooooooooooooooo",
- ContentType: gtsmodel.StatusContentTypePlain,
- AttachmentIDs: []string{},
- TagIDs: []string{},
- MentionIDs: []string{},
- EmojiIDs: []string{},
- CreatedAt: theDistantFuture,
- Local: util.Ptr(true),
- AccountURI: "http://localhost:8080/users/admin",
- AccountID: "01F8MH17FWEB39HZJ76B6VXSKF",
- InReplyToID: "",
- BoostOfID: "",
- ContentWarning: "",
- Visibility: gtsmodel.VisibilityPublic,
- Sensitive: util.Ptr(false),
- Language: "en",
- CreatedWithApplicationID: "01F8MGXQRHYF5QPMTMXP78QC2F",
- Federated: util.Ptr(true),
- InteractionPolicy: gtsmodel.DefaultInteractionPolicyPublic(),
- ActivityStreamsType: ap.ObjectNote,
- }
-}
-
func (suite *TimelineTestSuite) publicCount() int {
var publicCount int
for _, status := range suite.testStatuses {
@@ -92,7 +58,7 @@ func (suite *TimelineTestSuite) localCount() int {
return localCount
}
-func (suite *TimelineTestSuite) checkStatuses(statuses []*gtsmodel.Status, maxID string, minID string, expectedLength int) {
+func (suite *TimelineTestSuite) checkStatuses(statuses []*gtsmodel.Status, maxID string, minID string, expectedOrder paging.Order, expectedLength int) {
if l := len(statuses); l != expectedLength {
suite.FailNowf("", "expected %d statuses in slice, got %d", expectedLength, l)
} else if l == 0 {
@@ -100,74 +66,73 @@ func (suite *TimelineTestSuite) checkStatuses(statuses []*gtsmodel.Status, maxID
return
}
- // Check ordering + bounds of statuses.
- highest := statuses[0].ID
- for _, status := range statuses {
- id := status.ID
+ if expectedOrder.Ascending() {
+ // Check ordering + bounds of statuses.
+ lowest := statuses[0].ID
+ for _, status := range statuses {
+ id := status.ID
- if id >= maxID {
- suite.FailNowf("", "%s greater than maxID %s", id, maxID)
+ if id >= maxID {
+ suite.FailNowf("", "%s greater than maxID %s", id, maxID)
+ }
+
+ if id <= minID {
+ suite.FailNowf("", "%s smaller than minID %s", id, minID)
+ }
+
+ if id < lowest {
+ suite.FailNowf("", "statuses in slice were not ordered lowest -> highest ID")
+ }
+
+ lowest = id
}
+ } else {
+ // Check ordering + bounds of statuses.
+ highest := statuses[0].ID
+ for _, status := range statuses {
+ id := status.ID
- if id <= minID {
- suite.FailNowf("", "%s smaller than minID %s", id, minID)
+ if id >= maxID {
+ suite.FailNowf("", "%s greater than maxID %s", id, maxID)
+ }
+
+ if id <= minID {
+ suite.FailNowf("", "%s smaller than minID %s", id, minID)
+ }
+
+ if id > highest {
+ suite.FailNowf("", "statuses in slice were not ordered highest -> lowest ID")
+ }
+
+ highest = id
}
-
- if id > highest {
- suite.FailNowf("", "statuses in slice were not ordered highest -> lowest ID")
- }
-
- highest = id
}
}
func (suite *TimelineTestSuite) TestGetPublicTimeline() {
ctx := context.Background()
- s, err := suite.db.GetPublicTimeline(ctx, "", "", "", 20, false)
+ page := toPage("", "", "", 20)
+
+ s, err := suite.db.GetPublicTimeline(ctx, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.T().Log(kv.Field{
- K: "statuses", V: s,
- })
-
- suite.checkStatuses(s, id.Highest, id.Lowest, suite.publicCount())
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), suite.publicCount())
}
func (suite *TimelineTestSuite) TestGetPublicTimelineLocal() {
ctx := context.Background()
- s, err := suite.db.GetPublicTimeline(ctx, "", "", "", 20, true)
+ page := toPage("", "", "", 20)
+
+ s, err := suite.db.GetLocalTimeline(ctx, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.T().Log(kv.Field{
- K: "statuses", V: s,
- })
-
- suite.checkStatuses(s, id.Highest, id.Lowest, suite.localCount())
-}
-
-func (suite *TimelineTestSuite) TestGetPublicTimelineWithFutureStatus() {
- ctx := context.Background()
-
- // Insert a status set far in the
- // future, it shouldn't be retrieved.
- futureStatus := getFutureStatus()
- if err := suite.db.PutStatus(ctx, futureStatus); err != nil {
- suite.FailNow(err.Error())
- }
-
- s, err := suite.db.GetPublicTimeline(ctx, "", "", "", 20, false)
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.NotContains(s, futureStatus)
- suite.checkStatuses(s, id.Highest, id.Lowest, suite.publicCount())
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), suite.localCount())
}
func (suite *TimelineTestSuite) TestGetHomeTimeline() {
@@ -176,12 +141,14 @@ func (suite *TimelineTestSuite) TestGetHomeTimeline() {
viewingAccount = suite.testAccounts["local_account_1"]
)
- s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, "", "", "", 20, false)
+ page := toPage("", "", "", 20)
+
+ s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 20)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 20)
}
func (suite *TimelineTestSuite) TestGetHomeTimelineIgnoreExclusive() {
@@ -201,13 +168,15 @@ func (suite *TimelineTestSuite) TestGetHomeTimelineIgnoreExclusive() {
suite.FailNow(err.Error())
}
+ page := toPage("", "", "", 20)
+
// First try with list just set to exclusive.
// We should only get zork's own statuses.
- s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, "", "", "", 20, false)
+ s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 9)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 9)
// Remove admin account from the exclusive list.
listEntry := suite.testListEntries["local_account_1_list_1_entry_2"]
@@ -217,11 +186,11 @@ func (suite *TimelineTestSuite) TestGetHomeTimelineIgnoreExclusive() {
// Zork should only see their own
// statuses and admin's statuses now.
- s, err = suite.db.GetHomeTimeline(ctx, viewingAccount.ID, "", "", "", 20, false)
+ s, err = suite.db.GetHomeTimeline(ctx, viewingAccount.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 13)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 13)
}
func (suite *TimelineTestSuite) TestGetHomeTimelineNoFollowing() {
@@ -246,36 +215,16 @@ func (suite *TimelineTestSuite) TestGetHomeTimelineNoFollowing() {
}
}
+ page := toPage("", "", "", 20)
+
// Query should work fine; though far
// fewer statuses will be returned ofc.
- s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, "", "", "", 20, false)
+ s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 9)
-}
-
-func (suite *TimelineTestSuite) TestGetHomeTimelineWithFutureStatus() {
- var (
- ctx = context.Background()
- viewingAccount = suite.testAccounts["local_account_1"]
- )
-
- // Insert a status set far in the
- // future, it shouldn't be retrieved.
- futureStatus := getFutureStatus()
- if err := suite.db.PutStatus(ctx, futureStatus); err != nil {
- suite.FailNow(err.Error())
- }
-
- s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, "", "", "", 20, false)
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.NotContains(s, futureStatus)
- suite.checkStatuses(s, id.Highest, id.Lowest, 20)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 9)
}
func (suite *TimelineTestSuite) TestGetHomeTimelineBackToFront() {
@@ -284,14 +233,16 @@ func (suite *TimelineTestSuite) TestGetHomeTimelineBackToFront() {
viewingAccount = suite.testAccounts["local_account_1"]
)
- s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, "", "", id.Lowest, 5, false)
+ page := toPage("", "", id.Lowest, 5)
+
+ s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 5)
- suite.Equal("01F8MHAYFKS4KMXF8K5Y1C0KRN", s[0].ID)
- suite.Equal("01F8MH75CBF9JFX4ZAD54N0W0R", s[len(s)-1].ID)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 5)
+ suite.Equal("01F8MHAYFKS4KMXF8K5Y1C0KRN", s[len(s)-1].ID)
+ suite.Equal("01F8MH75CBF9JFX4ZAD54N0W0R", s[0].ID)
}
func (suite *TimelineTestSuite) TestGetHomeTimelineFromHighest() {
@@ -300,12 +251,14 @@ func (suite *TimelineTestSuite) TestGetHomeTimelineFromHighest() {
viewingAccount = suite.testAccounts["local_account_1"]
)
- s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, id.Highest, "", "", 5, false)
+ page := toPage(id.Highest, "", "", 5)
+
+ s, err := suite.db.GetHomeTimeline(ctx, viewingAccount.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 5)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 5)
suite.Equal("01JDPZEZ77X1NX0TY9M10BK1HM", s[0].ID)
suite.Equal("01HEN2RZ8BG29Y5Z9VJC73HZW7", s[len(s)-1].ID)
}
@@ -316,12 +269,14 @@ func (suite *TimelineTestSuite) TestGetListTimelineNoParams() {
list = suite.testLists["local_account_1_list_1"]
)
- s, err := suite.db.GetListTimeline(ctx, list.ID, "", "", "", 20)
+ page := toPage("", "", "", 20)
+
+ s, err := suite.db.GetListTimeline(ctx, list.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 13)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 13)
}
func (suite *TimelineTestSuite) TestGetListTimelineMaxID() {
@@ -330,12 +285,14 @@ func (suite *TimelineTestSuite) TestGetListTimelineMaxID() {
list = suite.testLists["local_account_1_list_1"]
)
- s, err := suite.db.GetListTimeline(ctx, list.ID, id.Highest, "", "", 5)
+ page := toPage(id.Highest, "", "", 5)
+
+ s, err := suite.db.GetListTimeline(ctx, list.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 5)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 5)
suite.Equal("01JDPZEZ77X1NX0TY9M10BK1HM", s[0].ID)
suite.Equal("01FN3VJGFH10KR7S2PB0GFJZYG", s[len(s)-1].ID)
}
@@ -346,14 +303,16 @@ func (suite *TimelineTestSuite) TestGetListTimelineMinID() {
list = suite.testLists["local_account_1_list_1"]
)
- s, err := suite.db.GetListTimeline(ctx, list.ID, "", "", id.Lowest, 5)
+ page := toPage("", "", id.Lowest, 5)
+
+ s, err := suite.db.GetListTimeline(ctx, list.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 5)
- suite.Equal("01F8MHC8VWDRBQR0N1BATDDEM5", s[0].ID)
- suite.Equal("01F8MH75CBF9JFX4ZAD54N0W0R", s[len(s)-1].ID)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 5)
+ suite.Equal("01F8MHC8VWDRBQR0N1BATDDEM5", s[len(s)-1].ID)
+ suite.Equal("01F8MH75CBF9JFX4ZAD54N0W0R", s[0].ID)
}
func (suite *TimelineTestSuite) TestGetListTimelineMinIDPagingUp() {
@@ -362,14 +321,16 @@ func (suite *TimelineTestSuite) TestGetListTimelineMinIDPagingUp() {
list = suite.testLists["local_account_1_list_1"]
)
- s, err := suite.db.GetListTimeline(ctx, list.ID, "", "", "01F8MHC8VWDRBQR0N1BATDDEM5", 5)
+ page := toPage("", "", "01F8MHC8VWDRBQR0N1BATDDEM5", 5)
+
+ s, err := suite.db.GetListTimeline(ctx, list.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, "01F8MHC8VWDRBQR0N1BATDDEM5", 5)
- suite.Equal("01G20ZM733MGN8J344T4ZDDFY1", s[0].ID)
- suite.Equal("01F8MHCP5P2NWYQ416SBA0XSEV", s[len(s)-1].ID)
+ suite.checkStatuses(s, id.Highest, "01F8MHC8VWDRBQR0N1BATDDEM5", page.Order(), 5)
+ suite.Equal("01G20ZM733MGN8J344T4ZDDFY1", s[len(s)-1].ID)
+ suite.Equal("01F8MHCP5P2NWYQ416SBA0XSEV", s[0].ID)
}
func (suite *TimelineTestSuite) TestGetTagTimelineNoParams() {
@@ -378,15 +339,33 @@ func (suite *TimelineTestSuite) TestGetTagTimelineNoParams() {
tag = suite.testTags["welcome"]
)
- s, err := suite.db.GetTagTimeline(ctx, tag.ID, "", "", "", 1)
+ page := toPage("", "", "", 1)
+
+ s, err := suite.db.GetTagTimeline(ctx, tag.ID, page)
if err != nil {
suite.FailNow(err.Error())
}
- suite.checkStatuses(s, id.Highest, id.Lowest, 1)
+ suite.checkStatuses(s, id.Highest, id.Lowest, page.Order(), 1)
suite.Equal("01F8MH75CBF9JFX4ZAD54N0W0R", s[0].ID)
}
func TestTimelineTestSuite(t *testing.T) {
suite.Run(t, new(TimelineTestSuite))
}
+
+// toPage is a helper function to wrap a series of paging arguments in paging.Page{}.
+func toPage(maxID, sinceID, minID string, limit int) *paging.Page {
+ var pg paging.Page
+ pg.Limit = limit
+
+ if maxID != "" {
+ pg.Max = paging.MaxID(maxID)
+ }
+
+ if sinceID != "" || minID != "" {
+ pg.Min = paging.EitherMinID(minID, sinceID)
+ }
+
+ return &pg
+}
diff --git a/internal/db/list.go b/internal/db/list.go
index 4ce0ff988..2e74329f1 100644
--- a/internal/db/list.go
+++ b/internal/db/list.go
@@ -34,6 +34,9 @@ type List interface {
// GetListsByAccountID gets all lists owned by the given accountID.
GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error)
+ // GetListIDsByAccountID gets the IDs of all lists owned by the given accountID.
+ GetListIDsByAccountID(ctx context.Context, accountID string) ([]string, error)
+
// CountListsByAccountID counts the number of lists owned by the given accountID.
CountListsByAccountID(ctx context.Context, accountID string) (int, error)
diff --git a/internal/db/timeline.go b/internal/db/timeline.go
index 43ac655d0..b97095246 100644
--- a/internal/db/timeline.go
+++ b/internal/db/timeline.go
@@ -21,20 +21,20 @@ import (
"context"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
// Timeline contains functionality for retrieving home/public/faved etc timelines for an account.
type Timeline interface {
// GetHomeTimeline returns a slice of statuses from accounts that are followed by the given account id.
- //
- // Statuses should be returned in descending order of when they were created (newest first).
- GetHomeTimeline(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error)
+ GetHomeTimeline(ctx context.Context, accountID string, page *paging.Page) ([]*gtsmodel.Status, error)
// GetPublicTimeline fetches the account's PUBLIC timeline -- ie., posts and replies that are public.
// It will use the given filters and try to return as many statuses as possible up to the limit.
- //
- // Statuses should be returned in descending order of when they were created (newest first).
- GetPublicTimeline(ctx context.Context, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, error)
+ GetPublicTimeline(ctx context.Context, page *paging.Page) ([]*gtsmodel.Status, error)
+
+ // GetLocalTimeline fetches the account's LOCAL timeline -- i.e. PUBLIC posts by LOCAL users.
+ GetLocalTimeline(ctx context.Context, page *paging.Page) ([]*gtsmodel.Status, error)
// GetFavedTimeline fetches the account's FAVED timeline -- ie., posts and replies that the requesting account has faved.
// It will use the given filters and try to return as many statuses as possible up to the limit.
@@ -46,10 +46,8 @@ type Timeline interface {
GetFavedTimeline(ctx context.Context, accountID string, maxID string, minID string, limit int) ([]*gtsmodel.Status, string, string, error)
// GetListTimeline returns a slice of statuses from followed accounts collected within the list with the given listID.
- // Statuses should be returned in descending order of when they were created (newest first).
- GetListTimeline(ctx context.Context, listID string, maxID string, sinceID string, minID string, limit int) ([]*gtsmodel.Status, error)
+ GetListTimeline(ctx context.Context, listID string, page *paging.Page) ([]*gtsmodel.Status, error)
// GetTagTimeline returns a slice of public-visibility statuses that use the given tagID.
- // Statuses should be returned in descending order of when they were created (newest first).
- GetTagTimeline(ctx context.Context, tagID string, maxID string, sinceID string, minID string, limit int) ([]*gtsmodel.Status, error)
+ GetTagTimeline(ctx context.Context, tagID string, page *paging.Page) ([]*gtsmodel.Status, error)
}
diff --git a/internal/federation/dereferencing/dereferencer_test.go b/internal/federation/dereferencing/dereferencer_test.go
index 95c879920..bd1bf50f1 100644
--- a/internal/federation/dereferencing/dereferencer_test.go
+++ b/internal/federation/dereferencing/dereferencer_test.go
@@ -77,12 +77,6 @@ func (suite *DereferencerStandardTestSuite) SetupTest() {
suite.intFilter = interaction.NewFilter(&suite.state)
suite.media = testrig.NewTestMediaManager(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- suite.visFilter,
- suite.converter,
- )
-
suite.client = testrig.NewMockHTTPClient(nil, "../../../testrig/media")
suite.storage = testrig.NewInMemoryStorage()
suite.state.DB = suite.db
diff --git a/internal/federation/federatingdb/federatingdb_test.go b/internal/federation/federatingdb/federatingdb_test.go
index ee8f84e55..a67abe762 100644
--- a/internal/federation/federatingdb/federatingdb_test.go
+++ b/internal/federation/federatingdb/federatingdb_test.go
@@ -25,7 +25,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/admin"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/federation/federatingdb"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/messages"
@@ -80,12 +79,6 @@ func (suite *FederatingDBTestSuite) SetupTest() {
suite.testActivities = testrig.NewTestActivities(suite.testAccounts)
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.federatingDB = testrig.NewTestFederatingDB(&suite.state)
testrig.StandardDBSetup(suite.db, suite.testAccounts)
diff --git a/internal/federation/federatingdb/undo.go b/internal/federation/federatingdb/undo.go
index a40b6c31a..6233c9de3 100644
--- a/internal/federation/federatingdb/undo.go
+++ b/internal/federation/federatingdb/undo.go
@@ -136,10 +136,7 @@ func (f *federatingDB) undoFollow(
// Convert AS Follow to barebones *gtsmodel.Follow,
// retrieving origin + target accts from the db.
- follow, err := f.converter.ASFollowToFollow(
- gtscontext.SetBarebones(ctx),
- asFollow,
- )
+ follow, err := f.converter.ASFollowToFollow(ctx, asFollow)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
err := gtserror.Newf("error converting AS Follow to follow: %w", err)
return err
@@ -152,6 +149,11 @@ func (f *federatingDB) undoFollow(
return nil
}
+ // Lock on the Follow URI
+ // as we may be updating it.
+ unlock := f.state.FedLocks.Lock(follow.URI)
+ defer unlock()
+
// Ensure addressee is follow target.
if follow.TargetAccountID != receivingAcct.ID {
const text = "receivingAcct was not Follow target"
@@ -178,7 +180,16 @@ func (f *federatingDB) undoFollow(
return err
}
- log.Debug(ctx, "Follow undone")
+ // Send the deleted follow through to
+ // the fedi worker to process side effects.
+ f.state.Workers.Federator.Queue.Push(&messages.FromFediAPI{
+ APObjectType: ap.ActivityFollow,
+ APActivityType: ap.ActivityUndo,
+ GTSModel: follow,
+ Receiving: receivingAcct,
+ Requesting: requestingAcct,
+ })
+
return nil
}
@@ -269,7 +280,16 @@ func (f *federatingDB) undoLike(
return err
}
- log.Debug(ctx, "Like undone")
+ // Send the deleted block through to
+ // the fedi worker to process side effects.
+ f.state.Workers.Federator.Queue.Push(&messages.FromFediAPI{
+ APObjectType: ap.ActivityLike,
+ APActivityType: ap.ActivityUndo,
+ GTSModel: fave,
+ Receiving: receivingAcct,
+ Requesting: requestingAcct,
+ })
+
return nil
}
@@ -298,10 +318,7 @@ func (f *federatingDB) undoBlock(
// Convert AS Block to barebones *gtsmodel.Block,
// retrieving origin + target accts from the DB.
- block, err := f.converter.ASBlockToBlock(
- gtscontext.SetBarebones(ctx),
- asBlock,
- )
+ block, err := f.converter.ASBlockToBlock(ctx, asBlock)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
err := gtserror.Newf("error converting AS Block to block: %w", err)
return err
@@ -333,7 +350,16 @@ func (f *federatingDB) undoBlock(
return err
}
- log.Debug(ctx, "Block undone")
+ // Send the deleted block through to
+ // the fedi worker to process side effects.
+ f.state.Workers.Federator.Queue.Push(&messages.FromFediAPI{
+ APObjectType: ap.ActivityBlock,
+ APActivityType: ap.ActivityUndo,
+ GTSModel: block,
+ Receiving: receivingAcct,
+ Requesting: requestingAcct,
+ })
+
return nil
}
diff --git a/internal/federation/federator_test.go b/internal/federation/federator_test.go
index a4f8c4683..7ea0432a1 100644
--- a/internal/federation/federator_test.go
+++ b/internal/federation/federator_test.go
@@ -23,7 +23,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
@@ -67,12 +66,6 @@ func (suite *FederatorStandardTestSuite) SetupTest() {
suite.state.Storage = suite.storage
suite.typeconverter = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.typeconverter,
- )
-
// Ensure it's possible to deref
// main key of foss satan.
fossSatanAS, err := suite.typeconverter.AccountToAS(context.Background(), suite.testAccounts["remote_account_1"])
diff --git a/internal/id/page.go b/internal/id/page.go
new file mode 100644
index 000000000..b43ccd4e2
--- /dev/null
+++ b/internal/id/page.go
@@ -0,0 +1,51 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package id
+
+import (
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
+)
+
+// ValidatePage ensures that passed page has valid paging
+// values for the current defined ordering. That is, it
+// ensures a valid page *cursor* value, using id.Highest
+// or id.Lowest where appropriate when none given.
+func ValidatePage(page *paging.Page) {
+ if page == nil {
+ // unpaged
+ return
+ }
+
+ switch page.Order() {
+ // If the page order is ascending,
+ // ensure that a minimum value is set.
+ // This will be used as the cursor.
+ case paging.OrderAscending:
+ if page.Min.Value == "" {
+ page.Min.Value = Lowest
+ }
+
+ // If the page order is descending,
+ // ensure that a maximum value is set.
+ // This will be used as the cursor.
+ case paging.OrderDescending:
+ if page.Max.Value == "" {
+ page.Max.Value = Highest
+ }
+ }
+}
diff --git a/internal/media/media_test.go b/internal/media/media_test.go
index f46f837da..e6175642b 100644
--- a/internal/media/media_test.go
+++ b/internal/media/media_test.go
@@ -21,13 +21,11 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/admin"
"github.com/superseriousbusiness/gotosocial/internal/db"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/transport"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -60,12 +58,6 @@ func (suite *MediaStandardTestSuite) SetupTest() {
testrig.StandardStorageSetup(suite.storage, "../../testrig/media")
testrig.StandardDBSetup(suite.db, nil)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.testAttachments = testrig.NewTestAttachments()
suite.testAccounts = testrig.NewTestAccounts()
suite.testEmojis = testrig.NewTestEmojis()
diff --git a/internal/paging/page.go b/internal/paging/page.go
index 082012879..6c91da6b2 100644
--- a/internal/paging/page.go
+++ b/internal/paging/page.go
@@ -64,10 +64,11 @@ func (p *Page) GetOrder() Order {
if p == nil {
return 0
}
- return p.order()
+ return p.Order()
}
-func (p *Page) order() Order {
+// Order is a small helper function to return page sort ordering.
+func (p *Page) Order() Order {
switch {
case p.Min.Order != 0:
return p.Min.Order
@@ -90,7 +91,7 @@ func (p *Page) Page(in []string) []string {
return in
}
- if p.order().Ascending() {
+ if p.Order().Ascending() {
// Sort type is ascending, input
// data is assumed to be ascending.
@@ -150,7 +151,7 @@ func Page_PageFunc[WithID any](p *Page, in []WithID, get func(WithID) string) []
return in
}
- if p.order().Ascending() {
+ if p.Order().Ascending() {
// Sort type is ascending, input
// data is assumed to be ascending.
diff --git a/internal/processing/account/account_test.go b/internal/processing/account/account_test.go
index 4173162cc..5b0c5f01e 100644
--- a/internal/processing/account/account_test.go
+++ b/internal/processing/account/account_test.go
@@ -95,12 +95,6 @@ func (suite *AccountStandardTestSuite) SetupTest() {
suite.state.AdminActions = admin.New(suite.state.DB, &suite.state.Workers)
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
diff --git a/internal/processing/admin/admin_test.go b/internal/processing/admin/admin_test.go
index 804abbc62..93f20d5e6 100644
--- a/internal/processing/admin/admin_test.go
+++ b/internal/processing/admin/admin_test.go
@@ -92,12 +92,6 @@ func (suite *AdminStandardTestSuite) SetupTest() {
suite.state.AdminActions = adminactions.New(suite.state.DB, &suite.state.Workers)
suite.tc = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.tc,
- )
-
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
diff --git a/internal/processing/common/status.go b/internal/processing/common/status.go
index 01f2ab72d..532b531e5 100644
--- a/internal/processing/common/status.go
+++ b/internal/processing/common/status.go
@@ -306,25 +306,10 @@ func (p *Processor) InvalidateTimelinedStatus(ctx context.Context, accountID str
return gtserror.Newf("db error getting lists for account %s: %w", accountID, err)
}
- // Start new log entry with
- // the above calling func's name.
- l := log.
- WithContext(ctx).
- WithField("caller", log.Caller(3)).
- WithField("accountID", accountID).
- WithField("statusID", statusID)
-
- // Unprepare item from home + list timelines, just log
- // if something goes wrong since this is not a showstopper.
-
- if err := p.state.Timelines.Home.UnprepareItem(ctx, accountID, statusID); err != nil {
- l.Errorf("error unpreparing item from home timeline: %v", err)
- }
-
+ // Unprepare item from home + list timelines.
+ p.state.Caches.Timelines.Home.MustGet(accountID).UnprepareByStatusIDs(statusID)
for _, list := range lists {
- if err := p.state.Timelines.List.UnprepareItem(ctx, list.ID, statusID); err != nil {
- l.Errorf("error unpreparing item from list timeline %s: %v", list.ID, err)
- }
+ p.state.Caches.Timelines.List.MustGet(list.ID).UnprepareByStatusIDs(statusID)
}
return nil
diff --git a/internal/processing/conversations/conversations_test.go b/internal/processing/conversations/conversations_test.go
index fecaf5666..06eef0e97 100644
--- a/internal/processing/conversations/conversations_test.go
+++ b/internal/processing/conversations/conversations_test.go
@@ -106,12 +106,6 @@ func (suite *ConversationsTestSuite) SetupTest() {
suite.tc = typeutils.NewConverter(&suite.state)
suite.filter = visibility.NewFilter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- suite.filter,
- suite.tc,
- )
-
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
diff --git a/internal/processing/processor_test.go b/internal/processing/processor_test.go
index 4b6406b03..a743f75ee 100644
--- a/internal/processing/processor_test.go
+++ b/internal/processing/processor_test.go
@@ -109,12 +109,6 @@ func (suite *ProcessingStandardTestSuite) SetupTest() {
suite.state.Storage = suite.storage
suite.typeconverter = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.typeconverter,
- )
-
suite.httpClient = testrig.NewMockHTTPClient(nil, "../../testrig/media")
suite.httpClient.TestRemotePeople = testrig.NewTestFediPeople()
suite.httpClient.TestRemoteStatuses = testrig.NewTestFediStatuses()
diff --git a/internal/processing/status/status_test.go b/internal/processing/status/status_test.go
index c163f95a7..19f3f5ebc 100644
--- a/internal/processing/status/status_test.go
+++ b/internal/processing/status/status_test.go
@@ -93,11 +93,6 @@ func (suite *StatusStandardTestSuite) SetupTest() {
visFilter := visibility.NewFilter(&suite.state)
intFilter := interaction.NewFilter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visFilter,
- suite.typeConverter,
- )
common := common.New(&suite.state, suite.mediaManager, suite.typeConverter, suite.federator, visFilter)
polls := polls.New(&common, &suite.state, suite.typeConverter)
diff --git a/internal/processing/timeline/common.go b/internal/processing/timeline/common.go
deleted file mode 100644
index 6d29d81d6..000000000
--- a/internal/processing/timeline/common.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "context"
-
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
-)
-
-// SkipInsert returns a function that satisifes SkipInsertFunction.
-func SkipInsert() timeline.SkipInsertFunction {
- // Gap to allow between a status or boost of status,
- // and reinsertion of a new boost of that status.
- // This is useful to avoid a heavily boosted status
- // showing up way too often in a user's timeline.
- const boostReinsertionDepth = 50
-
- return func(
- ctx context.Context,
- newItemID string,
- newItemAccountID string,
- newItemBoostOfID string,
- newItemBoostOfAccountID string,
- nextItemID string,
- nextItemAccountID string,
- nextItemBoostOfID string,
- nextItemBoostOfAccountID string,
- depth int,
- ) (bool, error) {
- if newItemID == nextItemID {
- // Don't insert duplicates.
- return true, nil
- }
-
- if newItemBoostOfID != "" {
- if newItemBoostOfID == nextItemBoostOfID &&
- depth < boostReinsertionDepth {
- // Don't insert boosts of items
- // we've seen boosted recently.
- return true, nil
- }
-
- if newItemBoostOfID == nextItemID &&
- depth < boostReinsertionDepth {
- // Don't insert boosts of items when
- // we've seen the original recently.
- return true, nil
- }
- }
-
- // Proceed with insertion
- // (that's what she said!).
- return false, nil
- }
-}
diff --git a/internal/processing/timeline/faved.go b/internal/processing/timeline/faved.go
index 6e915f4ef..bdafcac36 100644
--- a/internal/processing/timeline/faved.go
+++ b/internal/processing/timeline/faved.go
@@ -31,6 +31,7 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/util"
)
+// FavedTimelineGet ...
func (p *Processor) FavedTimelineGet(ctx context.Context, authed *apiutil.Auth, maxID string, minID string, limit int) (*apimodel.PageableResponse, gtserror.WithCode) {
statuses, nextMaxID, prevMinID, err := p.state.DB.GetFavedTimeline(ctx, authed.Account.ID, maxID, minID, limit)
if err != nil && !errors.Is(err, db.ErrNoEntries) {
diff --git a/internal/processing/timeline/home.go b/internal/processing/timeline/home.go
index 38cf38405..61fef005b 100644
--- a/internal/processing/timeline/home.go
+++ b/internal/processing/timeline/home.go
@@ -19,132 +19,85 @@ package timeline
import (
"context"
- "errors"
+ "net/url"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
- "github.com/superseriousbusiness/gotosocial/internal/db"
statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
- "github.com/superseriousbusiness/gotosocial/internal/filter/usermute"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
- "github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
- "github.com/superseriousbusiness/gotosocial/internal/util"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
-// HomeTimelineGrab returns a function that satisfies GrabFunction for home timelines.
-func HomeTimelineGrab(state *state.State) timeline.GrabFunction {
- return func(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int) ([]timeline.Timelineable, bool, error) {
- statuses, err := state.DB.GetHomeTimeline(ctx, accountID, maxID, sinceID, minID, limit, false)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- err = gtserror.Newf("error getting statuses from db: %w", err)
- return nil, false, err
- }
+// HomeTimelineGet gets a pageable timeline of statuses
+// in the home timeline of the requesting account.
+func (p *Processor) HomeTimelineGet(
+ ctx context.Context,
+ requester *gtsmodel.Account,
+ page *paging.Page,
+ local bool,
+) (
+ *apimodel.PageableResponse,
+ gtserror.WithCode,
+) {
- count := len(statuses)
- if count == 0 {
- // We just don't have enough statuses
- // left in the db so return stop = true.
- return nil, true, nil
+ var pageQuery url.Values
+ var postFilter func(*gtsmodel.Status) bool
+ if local {
+ // Set local = true query.
+ pageQuery = localOnlyTrue
+ postFilter = func(s *gtsmodel.Status) bool {
+ return !*s.Local
}
-
- items := make([]timeline.Timelineable, count)
- for i, s := range statuses {
- items[i] = s
- }
-
- return items, false, nil
+ } else {
+ // Set local = false query.
+ pageQuery = localOnlyFalse
+ postFilter = nil
}
-}
+ return p.getStatusTimeline(ctx,
-// HomeTimelineFilter returns a function that satisfies FilterFunction for home timelines.
-func HomeTimelineFilter(state *state.State, visFilter *visibility.Filter) timeline.FilterFunction {
- return func(ctx context.Context, accountID string, item timeline.Timelineable) (shouldIndex bool, err error) {
- status, ok := item.(*gtsmodel.Status)
- if !ok {
- err = gtserror.New("could not convert item to *gtsmodel.Status")
- return false, err
- }
+ // Auth'd
+ // account.
+ requester,
- requestingAccount, err := state.DB.GetAccountByID(ctx, accountID)
- if err != nil {
- err = gtserror.Newf("error getting account with id %s: %w", accountID, err)
- return false, err
- }
+ // Keyed-by-account-ID, home timeline cache.
+ p.state.Caches.Timelines.Home.MustGet(requester.ID),
- timelineable, err := visFilter.StatusHomeTimelineable(ctx, requestingAccount, status)
- if err != nil {
- err = gtserror.Newf("error checking hometimelineability of status %s for account %s: %w", status.ID, accountID, err)
- return false, err
- }
+ // Current
+ // page.
+ page,
- return timelineable, nil
- }
-}
+ // Home timeline endpoint.
+ "/api/v1/timelines/home",
-// HomeTimelineStatusPrepare returns a function that satisfies PrepareFunction for home timelines.
-func HomeTimelineStatusPrepare(state *state.State, converter *typeutils.Converter) timeline.PrepareFunction {
- return func(ctx context.Context, accountID string, itemID string) (timeline.Preparable, error) {
- status, err := state.DB.GetStatusByID(ctx, itemID)
- if err != nil {
- err = gtserror.Newf("error getting status with id %s: %w", itemID, err)
- return nil, err
- }
+ // Set local-only timeline
+ // page query flag, (this map
+ // later gets copied before
+ // any further usage).
+ pageQuery,
- requestingAccount, err := state.DB.GetAccountByID(ctx, accountID)
- if err != nil {
- err = gtserror.Newf("error getting account with id %s: %w", accountID, err)
- return nil, err
- }
+ // Status filter context.
+ statusfilter.FilterContextHome,
- filters, err := state.DB.GetFiltersForAccountID(ctx, requestingAccount.ID)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve filters for account %s: %w", requestingAccount.ID, err)
- return nil, err
- }
+ // Database load function.
+ func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) {
+ return p.state.DB.GetHomeTimeline(ctx, requester.ID, pg)
+ },
- mutes, err := state.DB.GetAccountMutes(gtscontext.SetBarebones(ctx), requestingAccount.ID, nil)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve mutes for account %s: %w", requestingAccount.ID, err)
- return nil, err
- }
- compiledMutes := usermute.NewCompiledUserMuteList(mutes)
+ // Filtering function,
+ // i.e. filter before caching.
+ func(s *gtsmodel.Status) bool {
- return converter.StatusToAPIStatus(ctx, status, requestingAccount, statusfilter.FilterContextHome, filters, compiledMutes)
- }
-}
+ // Check the visibility of passed status to requesting user.
+ ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s)
+ if err != nil {
+ log.Errorf(ctx, "error filtering status %s: %v", s.URI, err)
+ }
+ return !ok
+ },
-func (p *Processor) HomeTimelineGet(ctx context.Context, authed *apiutil.Auth, maxID string, sinceID string, minID string, limit int, local bool) (*apimodel.PageableResponse, gtserror.WithCode) {
- statuses, err := p.state.Timelines.Home.GetTimeline(ctx, authed.Account.ID, maxID, sinceID, minID, limit, local)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- err = gtserror.Newf("error getting statuses: %w", err)
- return nil, gtserror.NewErrorInternalError(err)
- }
-
- count := len(statuses)
- if count == 0 {
- return util.EmptyPageableResponse(), nil
- }
-
- var (
- items = make([]interface{}, count)
- nextMaxIDValue = statuses[count-1].GetID()
- prevMinIDValue = statuses[0].GetID()
+ // Post filtering funtion,
+ // i.e. filter after caching.
+ postFilter,
)
-
- for i := range statuses {
- items[i] = statuses[i]
- }
-
- return util.PackagePageableResponse(util.PageableResponseParams{
- Items: items,
- Path: "/api/v1/timelines/home",
- NextMaxIDValue: nextMaxIDValue,
- PrevMinIDValue: prevMinIDValue,
- Limit: limit,
- })
}
diff --git a/internal/processing/timeline/home_test.go b/internal/processing/timeline/home_test.go
index ea56418f6..50025b9a8 100644
--- a/internal/processing/timeline/home_test.go
+++ b/internal/processing/timeline/home_test.go
@@ -23,13 +23,9 @@ import (
"github.com/stretchr/testify/suite"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
- tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
@@ -37,25 +33,7 @@ type HomeTestSuite struct {
TimelineStandardTestSuite
}
-func (suite *HomeTestSuite) SetupTest() {
- suite.TimelineStandardTestSuite.SetupTest()
-
- suite.state.Timelines.Home = timeline.NewManager(
- tlprocessor.HomeTimelineGrab(&suite.state),
- tlprocessor.HomeTimelineFilter(&suite.state, visibility.NewFilter(&suite.state)),
- tlprocessor.HomeTimelineStatusPrepare(&suite.state, typeutils.NewConverter(&suite.state)),
- tlprocessor.SkipInsert(),
- )
- if err := suite.state.Timelines.Home.Start(); err != nil {
- suite.FailNow(err.Error())
- }
-}
-
func (suite *HomeTestSuite) TearDownTest() {
- if err := suite.state.Timelines.Home.Stop(); err != nil {
- suite.FailNow(err.Error())
- }
-
suite.TimelineStandardTestSuite.TearDownTest()
}
@@ -64,7 +42,6 @@ func (suite *HomeTestSuite) TestHomeTimelineGetHideFiltered() {
var (
ctx = context.Background()
requester = suite.testAccounts["local_account_1"]
- authed = &apiutil.Auth{Account: requester}
maxID = ""
sinceID = ""
minID = "01F8MHAAY43M6RJ473VQFCVH36" // 1 before filteredStatus
@@ -97,11 +74,12 @@ func (suite *HomeTestSuite) TestHomeTimelineGetHideFiltered() {
// Fetch the timeline to make sure the status we're going to filter is in that section of it.
resp, errWithCode := suite.timeline.HomeTimelineGet(
ctx,
- authed,
- maxID,
- sinceID,
- minID,
- limit,
+ requester,
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
local,
)
suite.NoError(errWithCode)
@@ -114,10 +92,9 @@ func (suite *HomeTestSuite) TestHomeTimelineGetHideFiltered() {
if !filteredStatusFound {
suite.FailNow("precondition failed: status we would filter isn't present in unfiltered timeline")
}
- // Prune the timeline to drop cached prepared statuses, a side effect of this precondition check.
- if _, err := suite.state.Timelines.Home.Prune(ctx, requester.ID, 0, 0); err != nil {
- suite.FailNow(err.Error())
- }
+
+ // Clear the timeline to drop all cached statuses.
+ suite.state.Caches.Timelines.Home.Clear(requester.ID)
// Create a filter to hide one status on the timeline.
if err := suite.db.PutFilter(ctx, filter); err != nil {
@@ -127,11 +104,12 @@ func (suite *HomeTestSuite) TestHomeTimelineGetHideFiltered() {
// Fetch the timeline again with the filter in place.
resp, errWithCode = suite.timeline.HomeTimelineGet(
ctx,
- authed,
- maxID,
- sinceID,
- minID,
- limit,
+ requester,
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
local,
)
diff --git a/internal/processing/timeline/list.go b/internal/processing/timeline/list.go
index 147f87ab4..10a7bb388 100644
--- a/internal/processing/timeline/list.go
+++ b/internal/processing/timeline/list.go
@@ -22,155 +22,93 @@ import (
"errors"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
"github.com/superseriousbusiness/gotosocial/internal/db"
statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
- "github.com/superseriousbusiness/gotosocial/internal/filter/usermute"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
- "github.com/superseriousbusiness/gotosocial/internal/util"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
-// ListTimelineGrab returns a function that satisfies GrabFunction for list timelines.
-func ListTimelineGrab(state *state.State) timeline.GrabFunction {
- return func(ctx context.Context, listID string, maxID string, sinceID string, minID string, limit int) ([]timeline.Timelineable, bool, error) {
- statuses, err := state.DB.GetListTimeline(ctx, listID, maxID, sinceID, minID, limit)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- err = gtserror.Newf("error getting statuses from db: %w", err)
- return nil, false, err
- }
-
- count := len(statuses)
- if count == 0 {
- // We just don't have enough statuses
- // left in the db so return stop = true.
- return nil, true, nil
- }
-
- items := make([]timeline.Timelineable, count)
- for i, s := range statuses {
- items[i] = s
- }
-
- return items, false, nil
- }
-}
-
-// ListTimelineFilter returns a function that satisfies FilterFunction for list timelines.
-func ListTimelineFilter(state *state.State, visFilter *visibility.Filter) timeline.FilterFunction {
- return func(ctx context.Context, listID string, item timeline.Timelineable) (shouldIndex bool, err error) {
- status, ok := item.(*gtsmodel.Status)
- if !ok {
- err = gtserror.New("could not convert item to *gtsmodel.Status")
- return false, err
- }
-
- list, err := state.DB.GetListByID(ctx, listID)
- if err != nil {
- err = gtserror.Newf("error getting list with id %s: %w", listID, err)
- return false, err
- }
-
- requestingAccount, err := state.DB.GetAccountByID(ctx, list.AccountID)
- if err != nil {
- err = gtserror.Newf("error getting account with id %s: %w", list.AccountID, err)
- return false, err
- }
-
- timelineable, err := visFilter.StatusHomeTimelineable(ctx, requestingAccount, status)
- if err != nil {
- err = gtserror.Newf("error checking hometimelineability of status %s for account %s: %w", status.ID, list.AccountID, err)
- return false, err
- }
-
- return timelineable, nil
- }
-}
-
-// ListTimelineStatusPrepare returns a function that satisfies PrepareFunction for list timelines.
-func ListTimelineStatusPrepare(state *state.State, converter *typeutils.Converter) timeline.PrepareFunction {
- return func(ctx context.Context, listID string, itemID string) (timeline.Preparable, error) {
- status, err := state.DB.GetStatusByID(ctx, itemID)
- if err != nil {
- err = gtserror.Newf("error getting status with id %s: %w", itemID, err)
- return nil, err
- }
-
- list, err := state.DB.GetListByID(ctx, listID)
- if err != nil {
- err = gtserror.Newf("error getting list with id %s: %w", listID, err)
- return nil, err
- }
-
- requestingAccount, err := state.DB.GetAccountByID(ctx, list.AccountID)
- if err != nil {
- err = gtserror.Newf("error getting account with id %s: %w", list.AccountID, err)
- return nil, err
- }
-
- filters, err := state.DB.GetFiltersForAccountID(ctx, requestingAccount.ID)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve filters for account %s: %w", requestingAccount.ID, err)
- return nil, err
- }
-
- mutes, err := state.DB.GetAccountMutes(gtscontext.SetBarebones(ctx), requestingAccount.ID, nil)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve mutes for account %s: %w", requestingAccount.ID, err)
- return nil, err
- }
- compiledMutes := usermute.NewCompiledUserMuteList(mutes)
-
- return converter.StatusToAPIStatus(ctx, status, requestingAccount, statusfilter.FilterContextHome, filters, compiledMutes)
- }
-}
-
-func (p *Processor) ListTimelineGet(ctx context.Context, authed *apiutil.Auth, listID string, maxID string, sinceID string, minID string, limit int) (*apimodel.PageableResponse, gtserror.WithCode) {
- // Ensure list exists + is owned by this account.
- list, err := p.state.DB.GetListByID(ctx, listID)
- if err != nil {
- if errors.Is(err, db.ErrNoEntries) {
- return nil, gtserror.NewErrorNotFound(err)
- }
+// ListTimelineGet gets a pageable timeline of statuses
+// in the list timeline of ID by the requesting account.
+func (p *Processor) ListTimelineGet(
+ ctx context.Context,
+ requester *gtsmodel.Account,
+ listID string,
+ page *paging.Page,
+) (
+ *apimodel.PageableResponse,
+ gtserror.WithCode,
+) {
+ // Fetch the requested list with ID.
+ list, err := p.state.DB.GetListByID(
+ gtscontext.SetBarebones(ctx),
+ listID,
+ )
+ if err != nil && !errors.Is(err, db.ErrNoEntries) {
return nil, gtserror.NewErrorInternalError(err)
}
- if list.AccountID != authed.Account.ID {
- err = gtserror.Newf("list with id %s does not belong to account %s", list.ID, authed.Account.ID)
+ // Check exists.
+ if list == nil {
+ const text = "list not found"
+ return nil, gtserror.NewErrorNotFound(
+ errors.New(text),
+ text,
+ )
+ }
+
+ // Check list owned by auth'd account.
+ if list.AccountID != requester.ID {
+ err := gtserror.New("list does not belong to account")
return nil, gtserror.NewErrorNotFound(err)
}
- statuses, err := p.state.Timelines.List.GetTimeline(ctx, listID, maxID, sinceID, minID, limit, false)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- err = gtserror.Newf("error getting statuses: %w", err)
- return nil, gtserror.NewErrorInternalError(err)
- }
+ // Fetch status timeline for list.
+ return p.getStatusTimeline(ctx,
- count := len(statuses)
- if count == 0 {
- return util.EmptyPageableResponse(), nil
- }
+ // Auth'd
+ // account.
+ requester,
- var (
- items = make([]interface{}, count)
- nextMaxIDValue = statuses[count-1].GetID()
- prevMinIDValue = statuses[0].GetID()
+ // Keyed-by-list-ID, list timeline cache.
+ p.state.Caches.Timelines.List.MustGet(listID),
+
+ // Current
+ // page.
+ page,
+
+ // List timeline ID's endpoint.
+ "/api/v1/timelines/list/"+listID,
+
+ // No page
+ // query.
+ nil,
+
+ // Status filter context.
+ statusfilter.FilterContextHome,
+
+ // Database load function.
+ func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) {
+ return p.state.DB.GetListTimeline(ctx, listID, pg)
+ },
+
+ // Filtering function,
+ // i.e. filter before caching.
+ func(s *gtsmodel.Status) bool {
+
+ // Check the visibility of passed status to requesting user.
+ ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s)
+ if err != nil {
+ log.Errorf(ctx, "error filtering status %s: %v", s.URI, err)
+ }
+ return !ok
+ },
+
+ // Post filtering funtion,
+ // i.e. filter after caching.
+ nil,
)
-
- for i := range statuses {
- items[i] = statuses[i]
- }
-
- return util.PackagePageableResponse(util.PageableResponseParams{
- Items: items,
- Path: "/api/v1/timelines/list/" + listID,
- NextMaxIDValue: nextMaxIDValue,
- PrevMinIDValue: prevMinIDValue,
- Limit: limit,
- })
}
diff --git a/internal/processing/timeline/notification.go b/internal/processing/timeline/notification.go
index 04a898198..ba1e3dba8 100644
--- a/internal/processing/timeline/notification.go
+++ b/internal/processing/timeline/notification.go
@@ -36,6 +36,7 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/util"
)
+// NotificationsGet ...
func (p *Processor) NotificationsGet(
ctx context.Context,
authed *apiutil.Auth,
diff --git a/internal/processing/timeline/public.go b/internal/processing/timeline/public.go
index dc00688e3..0e675da14 100644
--- a/internal/processing/timeline/public.go
+++ b/internal/processing/timeline/public.go
@@ -19,152 +19,143 @@ package timeline
import (
"context"
- "errors"
- "strconv"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/db"
statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
- "github.com/superseriousbusiness/gotosocial/internal/filter/usermute"
- "github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log"
- "github.com/superseriousbusiness/gotosocial/internal/util"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
)
+// PublicTimelineGet gets a pageable timeline of public statuses
+// for the given requesting account. It ensures that each status
+// in timeline is visible to the account before returning it.
+//
+// The local argument limits this to local-only statuses.
func (p *Processor) PublicTimelineGet(
ctx context.Context,
requester *gtsmodel.Account,
- maxID string,
- sinceID string,
- minID string,
- limit int,
+ page *paging.Page,
local bool,
-) (*apimodel.PageableResponse, gtserror.WithCode) {
- const maxAttempts = 3
- var (
- nextMaxIDValue string
- prevMinIDValue string
- items = make([]any, 0, limit)
- )
-
- var filters []*gtsmodel.Filter
- var compiledMutes *usermute.CompiledUserMuteList
- if requester != nil {
- var err error
- filters, err = p.state.DB.GetFiltersForAccountID(ctx, requester.ID)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve filters for account %s: %w", requester.ID, err)
- return nil, gtserror.NewErrorInternalError(err)
- }
-
- mutes, err := p.state.DB.GetAccountMutes(gtscontext.SetBarebones(ctx), requester.ID, nil)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve mutes for account %s: %w", requester.ID, err)
- return nil, gtserror.NewErrorInternalError(err)
- }
- compiledMutes = usermute.NewCompiledUserMuteList(mutes)
+) (
+ *apimodel.PageableResponse,
+ gtserror.WithCode,
+) {
+ if local {
+ return p.localTimelineGet(ctx, requester, page)
}
-
- // Try a few times to select appropriate public
- // statuses from the db, paging up or down to
- // reattempt if nothing suitable is found.
-outer:
- for attempts := 1; ; attempts++ {
- // Select slightly more than the limit to try to avoid situations where
- // we filter out all the entries, and have to make another db call.
- // It's cheaper to select more in 1 query than it is to do multiple queries.
- statuses, err := p.state.DB.GetPublicTimeline(ctx, maxID, sinceID, minID, limit+5, local)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- err = gtserror.Newf("db error getting statuses: %w", err)
- return nil, gtserror.NewErrorInternalError(err)
- }
-
- count := len(statuses)
- if count == 0 {
- // Nothing relevant (left) in the db.
- return util.EmptyPageableResponse(), nil
- }
-
- // Page up from first status in slice
- // (ie., one with the highest ID).
- prevMinIDValue = statuses[0].ID
-
- inner:
- for _, s := range statuses {
- // Push back the next page down ID to
- // this status, regardless of whether
- // we end up filtering it out or not.
- nextMaxIDValue = s.ID
-
- timelineable, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s)
- if err != nil {
- log.Errorf(ctx, "error checking status visibility: %v", err)
- continue inner
- }
-
- if !timelineable {
- continue inner
- }
-
- apiStatus, err := p.converter.StatusToAPIStatus(ctx, s, requester, statusfilter.FilterContextPublic, filters, compiledMutes)
- if errors.Is(err, statusfilter.ErrHideStatus) {
- continue
- }
- if err != nil {
- log.Errorf(ctx, "error converting to api status: %v", err)
- continue inner
- }
-
- // Looks good, add this.
- items = append(items, apiStatus)
-
- // We called the db with a little
- // more than the desired limit.
- //
- // Ensure we don't return more
- // than the caller asked for.
- if len(items) == limit {
- break outer
- }
- }
-
- if len(items) != 0 {
- // We've got some items left after
- // filtering, happily break + return.
- break
- }
-
- if attempts >= maxAttempts {
- // We reached our attempts limit.
- // Be nice + warn about it.
- log.Warn(ctx, "reached max attempts to find items in public timeline")
- break
- }
-
- // We filtered out all items before we
- // found anything we could return, but
- // we still have attempts left to try
- // fetching again. Set paging params
- // and allow loop to continue.
- if minID != "" {
- // Paging up.
- minID = prevMinIDValue
- } else {
- // Paging down.
- maxID = nextMaxIDValue
- }
- }
-
- return util.PackagePageableResponse(util.PageableResponseParams{
- Items: items,
- Path: "/api/v1/timelines/public",
- NextMaxIDValue: nextMaxIDValue,
- PrevMinIDValue: prevMinIDValue,
- Limit: limit,
- ExtraQueryParams: []string{
- "local=" + strconv.FormatBool(local),
- },
- })
+ return p.publicTimelineGet(ctx, requester, page)
+}
+
+func (p *Processor) publicTimelineGet(
+ ctx context.Context,
+ requester *gtsmodel.Account,
+ page *paging.Page,
+) (
+ *apimodel.PageableResponse,
+ gtserror.WithCode,
+) {
+ return p.getStatusTimeline(ctx,
+
+ // Auth acconut,
+ // can be nil.
+ requester,
+
+ // No cache.
+ nil,
+
+ // Current
+ // page.
+ page,
+
+ // Public timeline endpoint.
+ "/api/v1/timelines/public",
+
+ // Set local-only timeline
+ // page query flag, (this map
+ // later gets copied before
+ // any further usage).
+ localOnlyFalse,
+
+ // Status filter context.
+ statusfilter.FilterContextPublic,
+
+ // Database load function.
+ func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) {
+ return p.state.DB.GetPublicTimeline(ctx, pg)
+ },
+
+ // Pre-filtering function,
+ // i.e. filter before caching.
+ func(s *gtsmodel.Status) bool {
+
+ // Check the visibility of passed status to requesting user.
+ ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s)
+ if err != nil {
+ log.Errorf(ctx, "error filtering status %s: %v", s.URI, err)
+ }
+ return !ok
+ },
+
+ // Post filtering funtion,
+ // i.e. filter after caching.
+ nil,
+ )
+}
+
+func (p *Processor) localTimelineGet(
+ ctx context.Context,
+ requester *gtsmodel.Account,
+ page *paging.Page,
+) (
+ *apimodel.PageableResponse,
+ gtserror.WithCode,
+) {
+ return p.getStatusTimeline(ctx,
+
+ // Auth acconut,
+ // can be nil.
+ requester,
+
+ // No cache.
+ nil,
+
+ // Current
+ // page.
+ page,
+
+ // Public timeline endpoint.
+ "/api/v1/timelines/public",
+
+ // Set local-only timeline
+ // page query flag, (this map
+ // later gets copied before
+ // any further usage).
+ localOnlyTrue,
+
+ // Status filter context.
+ statusfilter.FilterContextPublic,
+
+ // Database load function.
+ func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) {
+ return p.state.DB.GetLocalTimeline(ctx, pg)
+ },
+
+ // Filtering function,
+ // i.e. filter before caching.
+ func(s *gtsmodel.Status) bool {
+
+ // Check the visibility of passed status to requesting user.
+ ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s)
+ if err != nil {
+ log.Errorf(ctx, "error filtering status %s: %v", s.URI, err)
+ }
+ return !ok
+ },
+
+ // Post filtering funtion,
+ // i.e. filter after caching.
+ nil,
+ )
}
diff --git a/internal/processing/timeline/public_test.go b/internal/processing/timeline/public_test.go
index ab8e33429..b5017af71 100644
--- a/internal/processing/timeline/public_test.go
+++ b/internal/processing/timeline/public_test.go
@@ -25,6 +25,7 @@ import (
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/id"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
@@ -46,10 +47,11 @@ func (suite *PublicTestSuite) TestPublicTimelineGet() {
resp, errWithCode := suite.timeline.PublicTimelineGet(
ctx,
requester,
- maxID,
- sinceID,
- minID,
- limit,
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
local,
)
@@ -79,10 +81,11 @@ func (suite *PublicTestSuite) TestPublicTimelineGetNotEmpty() {
resp, errWithCode := suite.timeline.PublicTimelineGet(
ctx,
requester,
- maxID,
- sinceID,
- minID,
- limit,
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
local,
)
@@ -90,9 +93,9 @@ func (suite *PublicTestSuite) TestPublicTimelineGetNotEmpty() {
// some other statuses were filtered out.
suite.NoError(errWithCode)
suite.Len(resp.Items, 1)
- suite.Equal(`; rel="next", ; rel="prev"`, resp.LinkHeader)
- suite.Equal(`http://localhost:8080/api/v1/timelines/public?limit=1&max_id=01F8MHCP5P2NWYQ416SBA0XSEV&local=false`, resp.NextLink)
- suite.Equal(`http://localhost:8080/api/v1/timelines/public?limit=1&min_id=01HE7XJ1CG84TBKH5V9XKBVGF5&local=false`, resp.PrevLink)
+ suite.Equal(`; rel="next", ; rel="prev"`, resp.LinkHeader)
+ suite.Equal(`http://localhost:8080/api/v1/timelines/public?limit=1&local=false&max_id=01F8MHCP5P2NWYQ416SBA0XSEV`, resp.NextLink)
+ suite.Equal(`http://localhost:8080/api/v1/timelines/public?limit=1&local=false&min_id=01HE7XJ1CG84TBKH5V9XKBVGF5`, resp.PrevLink)
}
// A timeline containing a status hidden due to filtering should return other statuses with no error.
@@ -133,10 +136,11 @@ func (suite *PublicTestSuite) TestPublicTimelineGetHideFiltered() {
resp, errWithCode := suite.timeline.PublicTimelineGet(
ctx,
requester,
- maxID,
- sinceID,
- minID,
- limit,
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
local,
)
suite.NoError(errWithCode)
@@ -149,8 +153,6 @@ func (suite *PublicTestSuite) TestPublicTimelineGetHideFiltered() {
if !filteredStatusFound {
suite.FailNow("precondition failed: status we would filter isn't present in unfiltered timeline")
}
- // The public timeline has no prepared status cache and doesn't need to be pruned,
- // as in the home timeline version of this test.
// Create a filter to hide one status on the timeline.
if err := suite.db.PutFilter(ctx, filter); err != nil {
@@ -161,10 +163,11 @@ func (suite *PublicTestSuite) TestPublicTimelineGetHideFiltered() {
resp, errWithCode = suite.timeline.PublicTimelineGet(
ctx,
requester,
- maxID,
- sinceID,
- minID,
- limit,
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
local,
)
diff --git a/internal/processing/timeline/tag.go b/internal/processing/timeline/tag.go
index 811d0bb33..685bac376 100644
--- a/internal/processing/timeline/tag.go
+++ b/internal/processing/timeline/tag.go
@@ -20,18 +20,16 @@ package timeline
import (
"context"
"errors"
- "fmt"
+ "net/http"
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
"github.com/superseriousbusiness/gotosocial/internal/db"
statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
- "github.com/superseriousbusiness/gotosocial/internal/filter/usermute"
- "github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
"github.com/superseriousbusiness/gotosocial/internal/text"
- "github.com/superseriousbusiness/gotosocial/internal/util"
)
// TagTimelineGet gets a pageable timeline for the given
@@ -40,37 +38,77 @@ import (
// to requestingAcct before returning it.
func (p *Processor) TagTimelineGet(
ctx context.Context,
- requestingAcct *gtsmodel.Account,
+ requester *gtsmodel.Account,
tagName string,
maxID string,
sinceID string,
minID string,
limit int,
) (*apimodel.PageableResponse, gtserror.WithCode) {
+
+ // Fetch the requested tag with name.
tag, errWithCode := p.getTag(ctx, tagName)
if errWithCode != nil {
return nil, errWithCode
}
+ // Check for a useable returned tag for endpoint.
if tag == nil || !*tag.Useable || !*tag.Listable {
+
// Obey mastodon API by returning 404 for this.
- err := fmt.Errorf("tag was not found, or not useable/listable on this instance")
- return nil, gtserror.NewErrorNotFound(err, err.Error())
+ const text = "tag was not found, or not useable/listable on this instance"
+ return nil, gtserror.NewWithCode(http.StatusNotFound, text)
}
- statuses, err := p.state.DB.GetTagTimeline(ctx, tag.ID, maxID, sinceID, minID, limit)
- if err != nil && !errors.Is(err, db.ErrNoEntries) {
- err = gtserror.Newf("db error getting statuses: %w", err)
- return nil, gtserror.NewErrorInternalError(err)
- }
+ // Fetch status timeline for tag.
+ return p.getStatusTimeline(ctx,
- return p.packageTagResponse(
- ctx,
- requestingAcct,
- statuses,
- limit,
- // Use API URL for tag.
+ // Auth'd
+ // account.
+ requester,
+
+ // No
+ // cache.
+ nil,
+
+ // Current
+ // page.
+ &paging.Page{
+ Min: paging.EitherMinID(minID, sinceID),
+ Max: paging.MaxID(maxID),
+ Limit: limit,
+ },
+
+ // Tag timeline name's endpoint.
"/api/v1/timelines/tag/"+tagName,
+
+ // No page
+ // query.
+ nil,
+
+ // Status filter context.
+ statusfilter.FilterContextPublic,
+
+ // Database load function.
+ func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) {
+ return p.state.DB.GetTagTimeline(ctx, tag.ID, pg)
+ },
+
+ // Filtering function,
+ // i.e. filter before caching.
+ func(s *gtsmodel.Status) bool {
+
+ // Check the visibility of passed status to requesting user.
+ ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s)
+ if err != nil {
+ log.Errorf(ctx, "error filtering status %s: %v", s.URI, err)
+ }
+ return !ok
+ },
+
+ // Post filtering funtion,
+ // i.e. filter after caching.
+ nil,
)
}
@@ -92,69 +130,3 @@ func (p *Processor) getTag(ctx context.Context, tagName string) (*gtsmodel.Tag,
return tag, nil
}
-
-func (p *Processor) packageTagResponse(
- ctx context.Context,
- requestingAcct *gtsmodel.Account,
- statuses []*gtsmodel.Status,
- limit int,
- requestPath string,
-) (*apimodel.PageableResponse, gtserror.WithCode) {
- count := len(statuses)
- if count == 0 {
- return util.EmptyPageableResponse(), nil
- }
-
- var (
- items = make([]interface{}, 0, count)
-
- // Set next + prev values before filtering and API
- // converting, so caller can still page properly.
- nextMaxIDValue = statuses[count-1].ID
- prevMinIDValue = statuses[0].ID
- )
-
- filters, err := p.state.DB.GetFiltersForAccountID(ctx, requestingAcct.ID)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve filters for account %s: %w", requestingAcct.ID, err)
- return nil, gtserror.NewErrorInternalError(err)
- }
-
- mutes, err := p.state.DB.GetAccountMutes(gtscontext.SetBarebones(ctx), requestingAcct.ID, nil)
- if err != nil {
- err = gtserror.Newf("couldn't retrieve mutes for account %s: %w", requestingAcct.ID, err)
- return nil, gtserror.NewErrorInternalError(err)
- }
- compiledMutes := usermute.NewCompiledUserMuteList(mutes)
-
- for _, s := range statuses {
- timelineable, err := p.visFilter.StatusTagTimelineable(ctx, requestingAcct, s)
- if err != nil {
- log.Errorf(ctx, "error checking status visibility: %v", err)
- continue
- }
-
- if !timelineable {
- continue
- }
-
- apiStatus, err := p.converter.StatusToAPIStatus(ctx, s, requestingAcct, statusfilter.FilterContextPublic, filters, compiledMutes)
- if errors.Is(err, statusfilter.ErrHideStatus) {
- continue
- }
- if err != nil {
- log.Errorf(ctx, "error converting to api status: %v", err)
- continue
- }
-
- items = append(items, apiStatus)
- }
-
- return util.PackagePageableResponse(util.PageableResponseParams{
- Items: items,
- Path: requestPath,
- NextMaxIDValue: nextMaxIDValue,
- PrevMinIDValue: prevMinIDValue,
- Limit: limit,
- })
-}
diff --git a/internal/processing/timeline/timeline.go b/internal/processing/timeline/timeline.go
index 5966fe864..54ea2cccd 100644
--- a/internal/processing/timeline/timeline.go
+++ b/internal/processing/timeline/timeline.go
@@ -18,9 +18,33 @@
package timeline
import (
+ "context"
+ "errors"
+ "net/http"
+ "net/url"
+
+ apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
+ timelinepkg "github.com/superseriousbusiness/gotosocial/internal/cache/timeline"
+ "github.com/superseriousbusiness/gotosocial/internal/db"
+ statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
+ "github.com/superseriousbusiness/gotosocial/internal/filter/usermute"
"github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
+ "github.com/superseriousbusiness/gotosocial/internal/gtserror"
+ "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+ "github.com/superseriousbusiness/gotosocial/internal/id"
+ "github.com/superseriousbusiness/gotosocial/internal/paging"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
+ "github.com/superseriousbusiness/gotosocial/internal/util/xslices"
+)
+
+var (
+ // pre-prepared URL values to be passed in to
+ // paging response forms. The paging package always
+ // copies values before any modifications so it's
+ // safe to only use a single map variable for these.
+ localOnlyTrue = url.Values{"local": {"true"}}
+ localOnlyFalse = url.Values{"local": {"false"}}
)
type Processor struct {
@@ -36,3 +60,114 @@ func New(state *state.State, converter *typeutils.Converter, visFilter *visibili
visFilter: visFilter,
}
}
+
+func (p *Processor) getStatusTimeline(
+ ctx context.Context,
+ requester *gtsmodel.Account,
+ timeline *timelinepkg.StatusTimeline,
+ page *paging.Page,
+ pagePath string,
+ pageQuery url.Values,
+ filterCtx statusfilter.FilterContext,
+ loadPage func(*paging.Page) (statuses []*gtsmodel.Status, err error),
+ filter func(*gtsmodel.Status) (delete bool),
+ postFilter func(*gtsmodel.Status) (remove bool),
+) (
+ *apimodel.PageableResponse,
+ gtserror.WithCode,
+) {
+ var err error
+ var filters []*gtsmodel.Filter
+ var mutes *usermute.CompiledUserMuteList
+
+ if requester != nil {
+ // Fetch all filters relevant for requesting account.
+ filters, err = p.state.DB.GetFiltersForAccountID(ctx,
+ requester.ID,
+ )
+ if err != nil && !errors.Is(err, db.ErrNoEntries) {
+ err := gtserror.Newf("error getting account filters: %w", err)
+ return nil, gtserror.NewErrorInternalError(err)
+ }
+
+ // Get a list of all account mutes for requester.
+ allMutes, err := p.state.DB.GetAccountMutes(ctx,
+ requester.ID,
+ nil, // i.e. all
+ )
+ if err != nil && !errors.Is(err, db.ErrNoEntries) {
+ err := gtserror.Newf("error getting account mutes: %w", err)
+ return nil, gtserror.NewErrorInternalError(err)
+ }
+
+ // Compile all account mutes to useable form.
+ mutes = usermute.NewCompiledUserMuteList(allMutes)
+ }
+
+ // Ensure we have valid
+ // input paging cursor.
+ id.ValidatePage(page)
+
+ // Load status page via timeline cache, also
+ // getting lo, hi values for next, prev pages.
+ //
+ // NOTE: this safely handles the case of a nil
+ // input timeline, i.e. uncached timeline type.
+ apiStatuses, lo, hi, err := timeline.Load(ctx,
+
+ // Status page
+ // to load.
+ page,
+
+ // Caller provided database
+ // status page loading function.
+ loadPage,
+
+ // Status load function for cached timeline entries.
+ func(ids []string) ([]*gtsmodel.Status, error) {
+ return p.state.DB.GetStatusesByIDs(ctx, ids)
+ },
+
+ // Call provided status
+ // filtering function.
+ filter,
+
+ // Frontend API model preparation function.
+ func(status *gtsmodel.Status) (*apimodel.Status, error) {
+
+ // Check if status needs filtering OUTSIDE of caching stage.
+ // TODO: this will be moved to separate postFilter hook when
+ // all filtering has been removed from the type converter.
+ if postFilter != nil && postFilter(status) {
+ return nil, nil
+ }
+
+ // Finally, pass status to get converted to API model.
+ apiStatus, err := p.converter.StatusToAPIStatus(ctx,
+ status,
+ requester,
+ filterCtx,
+ filters,
+ mutes,
+ )
+ if err != nil && !errors.Is(err, statusfilter.ErrHideStatus) {
+ return nil, err
+ }
+ return apiStatus, nil
+ },
+ )
+
+ if err != nil {
+ err := gtserror.Newf("error loading timeline: %w", err)
+ return nil, gtserror.WrapWithCode(http.StatusInternalServerError, err)
+ }
+
+ // Package returned API statuses as pageable response.
+ return paging.PackageResponse(paging.ResponseParams{
+ Items: xslices.ToAny(apiStatuses),
+ Path: pagePath,
+ Next: page.Next(lo, hi),
+ Prev: page.Prev(lo, hi),
+ Query: pageQuery,
+ }), nil
+}
diff --git a/internal/processing/workers/fromclientapi.go b/internal/processing/workers/fromclientapi.go
index 28a2b37b9..661fea866 100644
--- a/internal/processing/workers/fromclientapi.go
+++ b/internal/processing/workers/fromclientapi.go
@@ -371,7 +371,7 @@ func (p *clientAPI) CreateStatus(ctx context.Context, cMsg *messages.FromClientA
if status.InReplyToID != "" {
// Interaction counts changed on the replied status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(status.InReplyToID)
}
return nil
@@ -413,7 +413,7 @@ func (p *clientAPI) CreatePollVote(ctx context.Context, cMsg *messages.FromClien
}
// Interaction counts changed on the source status, uncache from timelines.
- p.surface.invalidateStatusFromTimelines(ctx, vote.Poll.StatusID)
+ p.surface.invalidateStatusFromTimelines(vote.Poll.StatusID)
return nil
}
@@ -565,7 +565,7 @@ func (p *clientAPI) CreateLike(ctx context.Context, cMsg *messages.FromClientAPI
// Interaction counts changed on the faved status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, fave.StatusID)
+ p.surface.invalidateStatusFromTimelines(fave.StatusID)
return nil
}
@@ -671,7 +671,7 @@ func (p *clientAPI) CreateAnnounce(ctx context.Context, cMsg *messages.FromClien
// Interaction counts changed on the boosted status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, boost.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(boost.BoostOfID)
return nil
}
@@ -682,22 +682,20 @@ func (p *clientAPI) CreateBlock(ctx context.Context, cMsg *messages.FromClientAP
return gtserror.Newf("%T not parseable as *gtsmodel.Block", cMsg.GTSModel)
}
- // Remove blockee's statuses from blocker's timeline.
- if err := p.state.Timelines.Home.WipeItemsFromAccountID(
- ctx,
- block.AccountID,
- block.TargetAccountID,
- ); err != nil {
- return gtserror.Newf("error wiping timeline items for block: %w", err)
+ if block.Account.IsLocal() {
+ // Remove posts by target from origin's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ block.AccountID,
+ block.TargetAccountID,
+ )
}
- // Remove blocker's statuses from blockee's timeline.
- if err := p.state.Timelines.Home.WipeItemsFromAccountID(
- ctx,
- block.TargetAccountID,
- block.AccountID,
- ); err != nil {
- return gtserror.Newf("error wiping timeline items for block: %w", err)
+ if block.TargetAccount.IsLocal() {
+ // Remove posts by origin from target's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ block.TargetAccountID,
+ block.AccountID,
+ )
}
// TODO: same with notifications?
@@ -737,7 +735,7 @@ func (p *clientAPI) UpdateStatus(ctx context.Context, cMsg *messages.FromClientA
}
// Status representation has changed, invalidate from timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.ID)
+ p.surface.invalidateStatusFromTimelines(status.ID)
return nil
}
@@ -858,6 +856,22 @@ func (p *clientAPI) UndoFollow(ctx context.Context, cMsg *messages.FromClientAPI
log.Errorf(ctx, "error updating account stats: %v", err)
}
+ if follow.Account.IsLocal() {
+ // Remove posts by target from origin's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ follow.AccountID,
+ follow.TargetAccountID,
+ )
+ }
+
+ if follow.TargetAccount.IsLocal() {
+ // Remove posts by origin from target's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ follow.TargetAccountID,
+ follow.AccountID,
+ )
+ }
+
if err := p.federate.UndoFollow(ctx, follow); err != nil {
log.Errorf(ctx, "error federating follow undo: %v", err)
}
@@ -890,7 +904,7 @@ func (p *clientAPI) UndoFave(ctx context.Context, cMsg *messages.FromClientAPI)
// Interaction counts changed on the faved status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, statusFave.StatusID)
+ p.surface.invalidateStatusFromTimelines(statusFave.StatusID)
return nil
}
@@ -910,9 +924,8 @@ func (p *clientAPI) UndoAnnounce(ctx context.Context, cMsg *messages.FromClientA
log.Errorf(ctx, "error updating account stats: %v", err)
}
- if err := p.surface.deleteStatusFromTimelines(ctx, status.ID); err != nil {
- log.Errorf(ctx, "error removing timelined status: %v", err)
- }
+ // Delete the boost wrapper status from timelines.
+ p.surface.deleteStatusFromTimelines(ctx, status.ID)
if err := p.federate.UndoAnnounce(ctx, status); err != nil {
log.Errorf(ctx, "error federating announce undo: %v", err)
@@ -920,7 +933,7 @@ func (p *clientAPI) UndoAnnounce(ctx context.Context, cMsg *messages.FromClientA
// Interaction counts changed on the boosted status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(status.BoostOfID)
return nil
}
@@ -983,7 +996,7 @@ func (p *clientAPI) DeleteStatus(ctx context.Context, cMsg *messages.FromClientA
if status.InReplyToID != "" {
// Interaction counts changed on the replied status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(status.InReplyToID)
}
return nil
@@ -1026,6 +1039,23 @@ func (p *clientAPI) DeleteAccountOrUser(ctx context.Context, cMsg *messages.From
p.state.Workers.Federator.Queue.Delete("Receiving.ID", account.ID)
p.state.Workers.Federator.Queue.Delete("TargetURI", account.URI)
+ // Remove any entries authored by account from timelines.
+ p.surface.removeTimelineEntriesByAccount(account.ID)
+
+ // Remove any of their cached timelines.
+ p.state.Caches.Timelines.Home.Delete(account.ID)
+
+ // Get the IDs of all the lists owned by the given account ID.
+ listIDs, err := p.state.DB.GetListIDsByAccountID(ctx, account.ID)
+ if err != nil {
+ log.Errorf(ctx, "error getting lists for account %s: %v", account.ID, err)
+ }
+
+ // Remove list timelines of account.
+ for _, listID := range listIDs {
+ p.state.Caches.Timelines.List.Delete(listID)
+ }
+
if err := p.federate.DeleteAccount(ctx, cMsg.Target); err != nil {
log.Errorf(ctx, "error federating account delete: %v", err)
}
@@ -1169,7 +1199,7 @@ func (p *clientAPI) AcceptLike(ctx context.Context, cMsg *messages.FromClientAPI
// Interaction counts changed on the faved status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, req.Like.StatusID)
+ p.surface.invalidateStatusFromTimelines(req.Like.StatusID)
return nil
}
@@ -1202,7 +1232,7 @@ func (p *clientAPI) AcceptReply(ctx context.Context, cMsg *messages.FromClientAP
// Interaction counts changed on the replied status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, reply.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(reply.InReplyToID)
return nil
}
@@ -1240,7 +1270,7 @@ func (p *clientAPI) AcceptAnnounce(ctx context.Context, cMsg *messages.FromClien
// Interaction counts changed on the original status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, boost.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(boost.BoostOfID)
return nil
}
diff --git a/internal/processing/workers/fromfediapi.go b/internal/processing/workers/fromfediapi.go
index 2e513449b..3e0f0ba59 100644
--- a/internal/processing/workers/fromfediapi.go
+++ b/internal/processing/workers/fromfediapi.go
@@ -197,9 +197,22 @@ func (p *Processor) ProcessFromFediAPI(ctx context.Context, fMsg *messages.FromF
// UNDO SOMETHING
case ap.ActivityUndo:
+ switch fMsg.APObjectType {
+ // UNDO FOLLOW
+ case ap.ActivityFollow:
+ return p.fediAPI.UndoFollow(ctx, fMsg)
+
+ // UNDO BLOCK
+ case ap.ActivityBlock:
+ return p.fediAPI.UndoBlock(ctx, fMsg)
+
// UNDO ANNOUNCE
- if fMsg.APObjectType == ap.ActivityAnnounce {
+ case ap.ActivityAnnounce:
return p.fediAPI.UndoAnnounce(ctx, fMsg)
+
+ // UNDO LIKE
+ case ap.ActivityLike:
+ return p.fediAPI.UndoFave(ctx, fMsg)
}
}
@@ -346,7 +359,7 @@ func (p *fediAPI) CreateStatus(ctx context.Context, fMsg *messages.FromFediAPI)
// Interaction counts changed on the replied status; uncache the
// prepared version from all timelines. The status dereferencer
// functions will ensure necessary ancestors exist before this point.
- p.surface.invalidateStatusFromTimelines(ctx, status.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(status.InReplyToID)
}
return nil
@@ -393,7 +406,7 @@ func (p *fediAPI) CreatePollVote(ctx context.Context, fMsg *messages.FromFediAPI
}
// Interaction counts changed, uncache from timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.ID)
+ p.surface.invalidateStatusFromTimelines(status.ID)
return nil
}
@@ -428,7 +441,7 @@ func (p *fediAPI) UpdatePollVote(ctx context.Context, fMsg *messages.FromFediAPI
}
// Interaction counts changed, uncache from timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.ID)
+ p.surface.invalidateStatusFromTimelines(status.ID)
return nil
}
@@ -573,7 +586,7 @@ func (p *fediAPI) CreateLike(ctx context.Context, fMsg *messages.FromFediAPI) er
// Interaction counts changed on the faved status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, fave.StatusID)
+ p.surface.invalidateStatusFromTimelines(fave.StatusID)
return nil
}
@@ -690,7 +703,7 @@ func (p *fediAPI) CreateAnnounce(ctx context.Context, fMsg *messages.FromFediAPI
// Interaction counts changed on the original status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, boost.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(boost.BoostOfID)
return nil
}
@@ -701,53 +714,32 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e
return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel)
}
- // Remove each account's posts from the other's timelines.
- //
- // First home timelines.
- if err := p.state.Timelines.Home.WipeItemsFromAccountID(
- ctx,
- block.AccountID,
- block.TargetAccountID,
- ); err != nil {
- log.Errorf(ctx, "error wiping items from block -> target's home timeline: %v", err)
+ if block.Account.IsLocal() {
+ // Remove posts by target from origin's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ block.AccountID,
+ block.TargetAccountID,
+ )
}
- if err := p.state.Timelines.Home.WipeItemsFromAccountID(
- ctx,
- block.TargetAccountID,
- block.AccountID,
- ); err != nil {
- log.Errorf(ctx, "error wiping items from target -> block's home timeline: %v", err)
- }
-
- // Now list timelines.
- if err := p.state.Timelines.List.WipeItemsFromAccountID(
- ctx,
- block.AccountID,
- block.TargetAccountID,
- ); err != nil {
- log.Errorf(ctx, "error wiping items from block -> target's list timeline(s): %v", err)
- }
-
- if err := p.state.Timelines.List.WipeItemsFromAccountID(
- ctx,
- block.TargetAccountID,
- block.AccountID,
- ); err != nil {
- log.Errorf(ctx, "error wiping items from target -> block's list timeline(s): %v", err)
+ if block.TargetAccount.IsLocal() {
+ // Remove posts by origin from target's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ block.TargetAccountID,
+ block.AccountID,
+ )
}
// Remove any follows that existed between blocker + blockee.
- if err := p.state.DB.DeleteFollow(
- ctx,
+ // (note this handles removing any necessary list entries).
+ if err := p.state.DB.DeleteFollow(ctx,
block.AccountID,
block.TargetAccountID,
); err != nil {
log.Errorf(ctx, "error deleting follow from block -> target: %v", err)
}
- if err := p.state.DB.DeleteFollow(
- ctx,
+ if err := p.state.DB.DeleteFollow(ctx,
block.TargetAccountID,
block.AccountID,
); err != nil {
@@ -755,16 +747,14 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e
}
// Remove any follow requests that existed between blocker + blockee.
- if err := p.state.DB.DeleteFollowRequest(
- ctx,
+ if err := p.state.DB.DeleteFollowRequest(ctx,
block.AccountID,
block.TargetAccountID,
); err != nil {
log.Errorf(ctx, "error deleting follow request from block -> target: %v", err)
}
- if err := p.state.DB.DeleteFollowRequest(
- ctx,
+ if err := p.state.DB.DeleteFollowRequest(ctx,
block.TargetAccountID,
block.AccountID,
); err != nil {
@@ -871,7 +861,7 @@ func (p *fediAPI) AcceptReply(ctx context.Context, fMsg *messages.FromFediAPI) e
// Interaction counts changed on the replied-to status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(status.InReplyToID)
return nil
}
@@ -920,11 +910,11 @@ func (p *fediAPI) AcceptRemoteStatus(ctx context.Context, fMsg *messages.FromFed
// Interaction counts changed on the interacted status;
// uncache the prepared version from all timelines.
if status.InReplyToID != "" {
- p.surface.invalidateStatusFromTimelines(ctx, status.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(status.InReplyToID)
}
if status.BoostOfID != "" {
- p.surface.invalidateStatusFromTimelines(ctx, status.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(status.BoostOfID)
}
return nil
@@ -953,7 +943,7 @@ func (p *fediAPI) AcceptAnnounce(ctx context.Context, fMsg *messages.FromFediAPI
// Interaction counts changed on the boosted status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, boost.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(boost.BoostOfID)
return nil
}
@@ -1004,7 +994,7 @@ func (p *fediAPI) UpdateStatus(ctx context.Context, fMsg *messages.FromFediAPI)
}
// Status representation was refetched, uncache from timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.ID)
+ p.surface.invalidateStatusFromTimelines(status.ID)
return nil
}
@@ -1063,7 +1053,7 @@ func (p *fediAPI) DeleteStatus(ctx context.Context, fMsg *messages.FromFediAPI)
if status.InReplyToID != "" {
// Interaction counts changed on the replied status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, status.InReplyToID)
+ p.surface.invalidateStatusFromTimelines(status.InReplyToID)
}
return nil
@@ -1090,6 +1080,9 @@ func (p *fediAPI) DeleteAccount(ctx context.Context, fMsg *messages.FromFediAPI)
p.state.Workers.Federator.Queue.Delete("Requesting.ID", account.ID)
p.state.Workers.Federator.Queue.Delete("TargetURI", account.URI)
+ // Remove any entries authored by account from timelines.
+ p.surface.removeTimelineEntriesByAccount(account.ID)
+
// First perform the actual account deletion.
if err := p.account.Delete(ctx, account, account.ID); err != nil {
log.Errorf(ctx, "error deleting account: %v", err)
@@ -1208,6 +1201,42 @@ func (p *fediAPI) RejectAnnounce(ctx context.Context, fMsg *messages.FromFediAPI
return nil
}
+func (p *fediAPI) UndoFollow(ctx context.Context, fMsg *messages.FromFediAPI) error {
+ follow, ok := fMsg.GTSModel.(*gtsmodel.Follow)
+ if !ok {
+ return gtserror.Newf("%T not parseable as *gtsmodel.Follow", fMsg.GTSModel)
+ }
+
+ if follow.Account.IsLocal() {
+ // Remove posts by target from origin's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ follow.AccountID,
+ follow.TargetAccountID,
+ )
+ }
+
+ if follow.TargetAccount.IsLocal() {
+ // Remove posts by origin from target's timelines.
+ p.surface.removeRelationshipFromTimelines(ctx,
+ follow.TargetAccountID,
+ follow.AccountID,
+ )
+ }
+
+ return nil
+}
+
+func (p *fediAPI) UndoBlock(ctx context.Context, fMsg *messages.FromFediAPI) error {
+ _, ok := fMsg.GTSModel.(*gtsmodel.Block)
+ if !ok {
+ return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel)
+ }
+
+ // TODO: any required changes
+
+ return nil
+}
+
func (p *fediAPI) UndoAnnounce(
ctx context.Context,
fMsg *messages.FromFediAPI,
@@ -1228,13 +1257,24 @@ func (p *fediAPI) UndoAnnounce(
}
// Remove the boost wrapper from all timelines.
- if err := p.surface.deleteStatusFromTimelines(ctx, boost.ID); err != nil {
- log.Errorf(ctx, "error removing timelined boost: %v", err)
- }
+ p.surface.deleteStatusFromTimelines(ctx, boost.ID)
// Interaction counts changed on the boosted status;
// uncache the prepared version from all timelines.
- p.surface.invalidateStatusFromTimelines(ctx, boost.BoostOfID)
+ p.surface.invalidateStatusFromTimelines(boost.BoostOfID)
+
+ return nil
+}
+
+func (p *fediAPI) UndoFave(ctx context.Context, fMsg *messages.FromFediAPI) error {
+ statusFave, ok := fMsg.GTSModel.(*gtsmodel.StatusFave)
+ if !ok {
+ return gtserror.Newf("%T not parseable as *gtsmodel.StatusFave", fMsg.GTSModel)
+ }
+
+ // Interaction counts changed on the faved status;
+ // uncache the prepared version from all timelines.
+ p.surface.invalidateStatusFromTimelines(statusFave.StatusID)
return nil
}
diff --git a/internal/processing/workers/surfacetimeline.go b/internal/processing/workers/surfacetimeline.go
index b071bd72e..0f2e80d0f 100644
--- a/internal/processing/workers/surfacetimeline.go
+++ b/internal/processing/workers/surfacetimeline.go
@@ -21,6 +21,7 @@ import (
"context"
"errors"
+ "github.com/superseriousbusiness/gotosocial/internal/cache/timeline"
statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
"github.com/superseriousbusiness/gotosocial/internal/filter/usermute"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
@@ -28,7 +29,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/log"
"github.com/superseriousbusiness/gotosocial/internal/stream"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
"github.com/superseriousbusiness/gotosocial/internal/util"
)
@@ -161,21 +161,16 @@ func (s *Surface) timelineAndNotifyStatusForFollowers(
// Add status to home timeline for owner of
// this follow (origin account), if applicable.
- homeTimelined, err = s.timelineStatus(ctx,
- s.State.Timelines.Home.IngestOne,
- follow.AccountID, // home timelines are keyed by account ID
+ if homeTimelined := s.timelineStatus(ctx,
+ s.State.Caches.Timelines.Home.MustGet(follow.AccountID),
follow.Account,
status,
stream.TimelineHome,
+ statusfilter.FilterContextHome,
filters,
mutes,
- )
- if err != nil {
- log.Errorf(ctx, "error home timelining status: %v", err)
- continue
- }
+ ); homeTimelined {
- if homeTimelined {
// If hometimelined, add to list of returned account IDs.
homeTimelinedAccountIDs = append(homeTimelinedAccountIDs, follow.AccountID)
}
@@ -261,22 +256,16 @@ func (s *Surface) listTimelineStatusForFollow(
exclusive = exclusive || *list.Exclusive
// At this point we are certain this status
- // should be included in the timeline of the
- // list that this list entry belongs to.
- listTimelined, err := s.timelineStatus(
- ctx,
- s.State.Timelines.List.IngestOne,
- list.ID, // list timelines are keyed by list ID
+ // should be included in timeline of this list.
+ listTimelined := s.timelineStatus(ctx,
+ s.State.Caches.Timelines.List.MustGet(list.ID),
follow.Account,
status,
stream.TimelineList+":"+list.ID, // key streamType to this specific list
+ statusfilter.FilterContextHome,
filters,
mutes,
)
- if err != nil {
- log.Errorf(ctx, "error adding status to list timeline: %v", err)
- continue
- }
// Update flag based on if timelined.
timelined = timelined || listTimelined
@@ -367,53 +356,48 @@ func (s *Surface) listEligible(
}
}
-// timelineStatus uses the provided ingest function to put the given
-// status in a timeline with the given ID, if it's timelineable.
-//
-// If the status was inserted into the timeline, true will be returned
-// + it will also be streamed to the user using the given streamType.
+// timelineStatus will insert the given status into the given timeline, if it's
+// timelineable. if the status was inserted into the timeline, true will be returned.
func (s *Surface) timelineStatus(
ctx context.Context,
- ingest func(context.Context, string, timeline.Timelineable) (bool, error),
- timelineID string,
+ timeline *timeline.StatusTimeline,
account *gtsmodel.Account,
status *gtsmodel.Status,
streamType string,
+ filterCtx statusfilter.FilterContext,
filters []*gtsmodel.Filter,
mutes *usermute.CompiledUserMuteList,
-) (bool, error) {
+) bool {
- // Ingest status into given timeline using provided function.
- if inserted, err := ingest(ctx, timelineID, status); err != nil &&
- !errors.Is(err, statusfilter.ErrHideStatus) {
- err := gtserror.Newf("error ingesting status %s: %w", status.ID, err)
- return false, err
- } else if !inserted {
- // Nothing more to do.
- return false, nil
- }
-
- // Convert updated database model to frontend model.
- apiStatus, err := s.Converter.StatusToAPIStatus(ctx,
+ // Attempt to convert status to frontend API representation,
+ // this will check whether status is filtered / muted.
+ apiModel, err := s.Converter.StatusToAPIStatus(ctx,
status,
account,
- statusfilter.FilterContextHome,
+ filterCtx,
filters,
mutes,
)
if err != nil && !errors.Is(err, statusfilter.ErrHideStatus) {
- err := gtserror.Newf("error converting status %s to frontend representation: %w", status.ID, err)
- return true, err
+ log.Error(ctx, "error converting status %s to frontend: %v", status.URI, err)
}
- if apiStatus != nil {
- // The status was inserted so stream it to the user.
- s.Stream.Update(ctx, account, apiStatus, streamType)
- return true, nil
+ // Insert status to timeline cache regardless of
+ // if API model was succesfully prepared or not.
+ repeatBoost := timeline.InsertOne(status, apiModel)
+
+ if apiModel == nil {
+ // Status was
+ // filtered / muted.
+ return false
}
- // Status was hidden.
- return false, nil
+ if !repeatBoost {
+ // Only stream if not repeated boost of recent status.
+ s.Stream.Update(ctx, account, apiModel, streamType)
+ }
+
+ return true
}
// timelineAndNotifyStatusForTagFollowers inserts the status into the
@@ -444,23 +428,15 @@ func (s *Surface) timelineAndNotifyStatusForTagFollowers(
continue
}
- if _, err := s.timelineStatus(
- ctx,
- s.State.Timelines.Home.IngestOne,
- tagFollowerAccount.ID, // home timelines are keyed by account ID
+ _ = s.timelineStatus(ctx,
+ s.State.Caches.Timelines.Home.MustGet(tagFollowerAccount.ID),
tagFollowerAccount,
status,
stream.TimelineHome,
+ statusfilter.FilterContextHome,
filters,
mutes,
- ); err != nil {
- errs.Appendf(
- "error inserting status %s into home timeline for account %s: %w",
- status.ID,
- tagFollowerAccount.ID,
- err,
- )
- }
+ )
}
return errs.Combine()
@@ -550,39 +526,6 @@ func (s *Surface) tagFollowersForStatus(
return visibleTagFollowerAccounts, errs.Combine()
}
-// deleteStatusFromTimelines completely removes the given status from all timelines.
-// It will also stream deletion of the status to all open streams.
-func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) error {
- if err := s.State.Timelines.Home.WipeItemFromAllTimelines(ctx, statusID); err != nil {
- return err
- }
- if err := s.State.Timelines.List.WipeItemFromAllTimelines(ctx, statusID); err != nil {
- return err
- }
- s.Stream.Delete(ctx, statusID)
- return nil
-}
-
-// invalidateStatusFromTimelines does cache invalidation on the given status by
-// unpreparing it from all timelines, forcing it to be prepared again (with updated
-// stats, boost counts, etc) next time it's fetched by the timeline owner. This goes
-// both for the status itself, and for any boosts of the status.
-func (s *Surface) invalidateStatusFromTimelines(ctx context.Context, statusID string) {
- if err := s.State.Timelines.Home.UnprepareItemFromAllTimelines(ctx, statusID); err != nil {
- log.
- WithContext(ctx).
- WithField("statusID", statusID).
- Errorf("error unpreparing status from home timelines: %v", err)
- }
-
- if err := s.State.Timelines.List.UnprepareItemFromAllTimelines(ctx, statusID); err != nil {
- log.
- WithContext(ctx).
- WithField("statusID", statusID).
- Errorf("error unpreparing status from list timelines: %v", err)
- }
-}
-
// timelineStatusUpdate looks up HOME and LIST timelines of accounts
// that follow the the status author or tags and pushes edit messages into any
// active streams.
@@ -859,3 +802,47 @@ func (s *Surface) timelineStatusUpdateForTagFollowers(
}
return errs.Combine()
}
+
+// deleteStatusFromTimelines completely removes the given status from all timelines.
+// It will also stream deletion of the status to all open streams.
+func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) {
+ s.State.Caches.Timelines.Home.RemoveByStatusIDs(statusID)
+ s.State.Caches.Timelines.List.RemoveByStatusIDs(statusID)
+ s.Stream.Delete(ctx, statusID)
+}
+
+// invalidateStatusFromTimelines does cache invalidation on the given status by
+// unpreparing it from all timelines, forcing it to be prepared again (with updated
+// stats, boost counts, etc) next time it's fetched by the timeline owner. This goes
+// both for the status itself, and for any boosts of the status.
+func (s *Surface) invalidateStatusFromTimelines(statusID string) {
+ s.State.Caches.Timelines.Home.UnprepareByStatusIDs(statusID)
+ s.State.Caches.Timelines.List.UnprepareByStatusIDs(statusID)
+}
+
+// removeTimelineEntriesByAccount removes all cached timeline entries authored by account ID.
+func (s *Surface) removeTimelineEntriesByAccount(accountID string) {
+ s.State.Caches.Timelines.Home.RemoveByAccountIDs(accountID)
+ s.State.Caches.Timelines.List.RemoveByAccountIDs(accountID)
+}
+
+func (s *Surface) removeRelationshipFromTimelines(ctx context.Context, timelineAccountID string, targetAccountID string) {
+ // Remove all statuses by target account
+ // from given account's home timeline.
+ s.State.Caches.Timelines.Home.
+ MustGet(timelineAccountID).
+ RemoveByAccountIDs(targetAccountID)
+
+ // Get the IDs of all the lists owned by the given account ID.
+ listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, timelineAccountID)
+ if err != nil {
+ log.Errorf(ctx, "error getting lists for account %s: %v", timelineAccountID, err)
+ }
+
+ for _, listID := range listIDs {
+ // Remove all statuses by target account
+ // from given account's list timelines.
+ s.State.Caches.Timelines.List.MustGet(listID).
+ RemoveByAccountIDs(targetAccountID)
+ }
+}
diff --git a/internal/processing/workers/util.go b/internal/processing/workers/util.go
index b358dc951..d844ab762 100644
--- a/internal/processing/workers/util.go
+++ b/internal/processing/workers/util.go
@@ -172,15 +172,11 @@ func (u *utils) wipeStatus(
}
// Remove the boost from any and all timelines.
- if err := u.surface.deleteStatusFromTimelines(ctx, boost.ID); err != nil {
- errs.Appendf("error deleting boost from timelines: %w", err)
- }
+ u.surface.deleteStatusFromTimelines(ctx, boost.ID)
}
// Delete the status itself from any and all timelines.
- if err := u.surface.deleteStatusFromTimelines(ctx, status.ID); err != nil {
- errs.Appendf("error deleting status from timelines: %w", err)
- }
+ u.surface.deleteStatusFromTimelines(ctx, status.ID)
// Delete this status from any conversations it's part of.
if err := u.state.DB.DeleteStatusFromConversations(ctx, status.ID); err != nil {
diff --git a/internal/state/state.go b/internal/state/state.go
index 8aefa658a..d6f58e714 100644
--- a/internal/state/state.go
+++ b/internal/state/state.go
@@ -23,7 +23,6 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/cache"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/storage"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
"github.com/superseriousbusiness/gotosocial/internal/workers"
)
@@ -34,11 +33,10 @@ import (
// subpackage initialization, while the returned subpackage type will later
// then be set and stored within the State{} itself.
type State struct {
- // Caches provides access to this state's collection of caches.
- Caches cache.Caches
- // Timelines provides access to this state's collection of timelines.
- Timelines timeline.Timelines
+ // Caches provides access to this
+ // state's collection of caches.
+ Caches cache.Caches
// DB provides access to the database.
DB db.DB
@@ -59,7 +57,8 @@ type State struct {
// pinned statuses, creating notifs, etc.
ProcessingLocks mutexes.MutexMap
- // Storage provides access to the storage driver.
+ // Storage provides access
+ // to the storage driver.
Storage *storage.Driver
// Workers provides access to this
diff --git a/internal/timeline/get.go b/internal/timeline/get.go
deleted file mode 100644
index 06ee8c174..000000000
--- a/internal/timeline/get.go
+++ /dev/null
@@ -1,428 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "container/list"
- "context"
- "errors"
- "time"
-
- "codeberg.org/gruf/go-kv"
- "github.com/superseriousbusiness/gotosocial/internal/db"
- statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
- "github.com/superseriousbusiness/gotosocial/internal/gtserror"
- "github.com/superseriousbusiness/gotosocial/internal/id"
- "github.com/superseriousbusiness/gotosocial/internal/log"
-)
-
-func (t *timeline) LastGot() time.Time {
- t.Lock()
- defer t.Unlock()
- return t.lastGot
-}
-
-func (t *timeline) Get(ctx context.Context, amount int, maxID string, sinceID string, minID string, prepareNext bool) ([]Preparable, error) {
- l := log.WithContext(ctx).
- WithFields(kv.Fields{
- {"accountID", t.timelineID},
- {"amount", amount},
- {"maxID", maxID},
- {"sinceID", sinceID},
- {"minID", minID},
- }...)
- l.Trace("entering get and updating t.lastGot")
-
- // Regardless of what happens below, update the
- // last time Get was called for this timeline.
- t.Lock()
- t.lastGot = time.Now()
- t.Unlock()
-
- var (
- items []Preparable
- err error
- )
-
- switch {
- case maxID == "" && sinceID == "" && minID == "":
- // No params are defined so just fetch from the top.
- // This is equivalent to a user starting to view
- // their timeline from newest -> older posts.
- items, err = t.getXBetweenIDs(ctx, amount, id.Highest, id.Lowest, true)
-
- // Cache expected next query to speed up scrolling.
- // Assume the user will be scrolling downwards from
- // the final ID in items.
- if prepareNext && err == nil && len(items) != 0 {
- nextMaxID := items[len(items)-1].GetID()
- t.prepareNextQuery(amount, nextMaxID, "", "")
- }
-
- case maxID != "" && sinceID == "" && minID == "":
- // Only maxID is defined, so fetch from maxID onwards.
- // This is equivalent to a user paging further down
- // their timeline from newer -> older posts.
- items, err = t.getXBetweenIDs(ctx, amount, maxID, id.Lowest, true)
-
- // Cache expected next query to speed up scrolling.
- // Assume the user will be scrolling downwards from
- // the final ID in items.
- if prepareNext && err == nil && len(items) != 0 {
- nextMaxID := items[len(items)-1].GetID()
- t.prepareNextQuery(amount, nextMaxID, "", "")
- }
-
- // In the next cases, maxID is defined, and so are
- // either sinceID or minID. This is equivalent to
- // a user opening an in-progress timeline and asking
- // for a slice of posts somewhere in the middle, or
- // trying to "fill in the blanks" between two points,
- // paging either up or down.
- case maxID != "" && sinceID != "":
- items, err = t.getXBetweenIDs(ctx, amount, maxID, sinceID, true)
-
- // Cache expected next query to speed up scrolling.
- // We can assume the caller is scrolling downwards.
- // Guess id.Lowest as sinceID, since we don't actually
- // know what the next sinceID would be.
- if prepareNext && err == nil && len(items) != 0 {
- nextMaxID := items[len(items)-1].GetID()
- t.prepareNextQuery(amount, nextMaxID, id.Lowest, "")
- }
-
- case maxID != "" && minID != "":
- items, err = t.getXBetweenIDs(ctx, amount, maxID, minID, false)
-
- // Cache expected next query to speed up scrolling.
- // We can assume the caller is scrolling upwards.
- // Guess id.Highest as maxID, since we don't actually
- // know what the next maxID would be.
- if prepareNext && err == nil && len(items) != 0 {
- prevMinID := items[0].GetID()
- t.prepareNextQuery(amount, id.Highest, "", prevMinID)
- }
-
- // In the final cases, maxID is not defined, but
- // either sinceID or minID are. This is equivalent to
- // a user either "pulling up" at the top of their timeline
- // to refresh it and check if newer posts have come in, or
- // trying to scroll upwards from an old post to see what
- // they missed since then.
- //
- // In these calls, we use the highest possible ulid as
- // behindID because we don't have a cap for newest that
- // we're interested in.
- case maxID == "" && sinceID != "":
- items, err = t.getXBetweenIDs(ctx, amount, id.Highest, sinceID, true)
-
- // We can't cache an expected next query for this one,
- // since presumably the caller is at the top of their
- // timeline already.
-
- case maxID == "" && minID != "":
- items, err = t.getXBetweenIDs(ctx, amount, id.Highest, minID, false)
-
- // Cache expected next query to speed up scrolling.
- // We can assume the caller is scrolling upwards.
- // Guess id.Highest as maxID, since we don't actually
- // know what the next maxID would be.
- if prepareNext && err == nil && len(items) != 0 {
- prevMinID := items[0].GetID()
- t.prepareNextQuery(amount, id.Highest, "", prevMinID)
- }
-
- default:
- err = gtserror.New("switch statement exhausted with no results")
- }
-
- return items, err
-}
-
-// getXBetweenIDs returns x amount of items somewhere between (not including) the given IDs.
-//
-// If frontToBack is true, items will be served paging down from behindID.
-// This corresponds to an api call to /timelines/home?max_id=WHATEVER&since_id=WHATEVER
-//
-// If frontToBack is false, items will be served paging up from beforeID.
-// This corresponds to an api call to /timelines/home?max_id=WHATEVER&min_id=WHATEVER
-func (t *timeline) getXBetweenIDs(ctx context.Context, amount int, behindID string, beforeID string, frontToBack bool) ([]Preparable, error) {
- l := log.
- WithContext(ctx).
- WithFields(kv.Fields{
- {"amount", amount},
- {"behindID", behindID},
- {"beforeID", beforeID},
- {"frontToBack", frontToBack},
- }...)
- l.Trace("entering getXBetweenID")
-
- // Assume length we need to return.
- items := make([]Preparable, 0, amount)
-
- if beforeID >= behindID {
- // This is an impossible situation, we
- // can't serve anything between these.
- return items, nil
- }
-
- // Try to ensure we have enough items prepared.
- if err := t.prepareXBetweenIDs(ctx, amount, behindID, beforeID, frontToBack); err != nil {
- // An error here doesn't necessarily mean we
- // can't serve anything, so log + keep going.
- l.Debugf("error calling prepareXBetweenIDs: %s", err)
- }
-
- var (
- beforeIDMark *list.Element
- served int
- // Our behavior while ranging through the
- // list changes depending on if we're
- // going front-to-back or back-to-front.
- //
- // To avoid checking which one we're doing
- // in each loop iteration, define our range
- // function here outside the loop.
- //
- // The bool indicates to the caller whether
- // iteration should continue (true) or stop
- // (false).
- rangeF func(e *list.Element) (bool, error)
- // If we get certain errors on entries as we're
- // looking through, we might want to cheekily
- // remove their elements from the timeline.
- // Everything added to this slice will be removed.
- removeElements = []*list.Element{}
- )
-
- defer func() {
- for _, e := range removeElements {
- t.items.data.Remove(e)
- }
- }()
-
- if frontToBack {
- // We're going front-to-back, which means we
- // don't need to look for a mark per se, we
- // just keep serving items until we've reached
- // a point where the items are out of the range
- // we're interested in.
- rangeF = func(e *list.Element) (bool, error) {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.itemID >= behindID {
- // ID of this item is too high,
- // just keep iterating.
- l.Trace("item is too new, continuing")
- return true, nil
- }
-
- if entry.itemID <= beforeID {
- // We've gone as far as we can through
- // the list and reached entries that are
- // now too old for us, stop here.
- l.Trace("reached older items, breaking")
- return false, nil
- }
-
- l.Trace("entry is just right")
-
- if entry.prepared == nil {
- // Whoops, this entry isn't prepared yet; some
- // race condition? That's OK, we can do it now.
- prepared, err := t.prepareFunction(ctx, t.timelineID, entry.itemID)
- if err != nil {
- if errors.Is(err, statusfilter.ErrHideStatus) {
- // This item has been filtered out by the requesting user's filters.
- // Remove it and skip past it.
- removeElements = append(removeElements, e)
- return true, nil
- }
- if errors.Is(err, db.ErrNoEntries) {
- // ErrNoEntries means something has been deleted,
- // so we'll likely not be able to ever prepare this.
- // This means we can remove it and skip past it.
- l.Debugf("db.ErrNoEntries while trying to prepare %s; will remove from timeline", entry.itemID)
- removeElements = append(removeElements, e)
- return true, nil
- }
- // We've got a proper db error.
- err = gtserror.Newf("db error while trying to prepare %s: %w", entry.itemID, err)
- return false, err
- }
- entry.prepared = prepared
- }
-
- items = append(items, entry.prepared)
-
- served++
- return served < amount, nil
- }
- } else {
- // Iterate through the list from the top, until
- // we reach an item with id smaller than beforeID;
- // ie., an item OLDER than beforeID. At that point,
- // we can stop looking because we're not interested
- // in older entries.
- rangeF = func(e *list.Element) (bool, error) {
- // Move the mark back one place each loop.
- beforeIDMark = e
-
- if entry := e.Value.(*indexedItemsEntry); entry.itemID <= beforeID {
- // We've gone as far as we can through
- // the list and reached entries that are
- // now too old for us, stop here.
- l.Trace("reached older items, breaking")
- return false, nil
- }
-
- return true, nil
- }
- }
-
- // Iterate through the list until the function
- // we defined above instructs us to stop.
- for e := t.items.data.Front(); e != nil; e = e.Next() {
- keepGoing, err := rangeF(e)
- if err != nil {
- return nil, err
- }
-
- if !keepGoing {
- break
- }
- }
-
- if frontToBack || beforeIDMark == nil {
- // If we're serving front to back, then
- // items should be populated by now. If
- // we're serving back to front but didn't
- // find any items newer than beforeID,
- // we can just return empty items.
- return items, nil
- }
-
- // We're serving back to front, so iterate upwards
- // towards the front of the list from the mark we found,
- // until we either get to the front, serve enough
- // items, or reach behindID.
- //
- // To preserve ordering, we need to reverse the slice
- // when we're finished.
- for e := beforeIDMark; e != nil; e = e.Prev() {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.itemID == beforeID {
- // Don't include the beforeID
- // entry itself, just continue.
- l.Trace("entry item ID is equal to beforeID, skipping")
- continue
- }
-
- if entry.itemID >= behindID {
- // We've reached items that are
- // newer than what we're looking
- // for, just stop here.
- l.Trace("reached newer items, breaking")
- break
- }
-
- if entry.prepared == nil {
- // Whoops, this entry isn't prepared yet; some
- // race condition? That's OK, we can do it now.
- prepared, err := t.prepareFunction(ctx, t.timelineID, entry.itemID)
- if err != nil {
- if errors.Is(err, statusfilter.ErrHideStatus) {
- // This item has been filtered out by the requesting user's filters.
- // Remove it and skip past it.
- removeElements = append(removeElements, e)
- continue
- }
- if errors.Is(err, db.ErrNoEntries) {
- // ErrNoEntries means something has been deleted,
- // so we'll likely not be able to ever prepare this.
- // This means we can remove it and skip past it.
- l.Debugf("db.ErrNoEntries while trying to prepare %s; will remove from timeline", entry.itemID)
- removeElements = append(removeElements, e)
- continue
- }
- // We've got a proper db error.
- err = gtserror.Newf("db error while trying to prepare %s: %w", entry.itemID, err)
- return nil, err
- }
- entry.prepared = prepared
- }
-
- items = append(items, entry.prepared)
-
- served++
- if served >= amount {
- break
- }
- }
-
- // Reverse order of items.
- // https://zchee.github.io/golang-wiki/SliceTricks/#reversing
- for l, r := 0, len(items)-1; l < r; l, r = l+1, r-1 {
- items[l], items[r] = items[r], items[l]
- }
-
- return items, nil
-}
-
-func (t *timeline) prepareNextQuery(amount int, maxID string, sinceID string, minID string) {
- var (
- // We explicitly use context.Background() rather than
- // accepting a context param because we don't want this
- // to stop/break when the calling context finishes.
- ctx = context.Background()
- err error
- )
-
- // Always perform this async so caller doesn't have to wait.
- go func() {
- switch {
- case maxID == "" && sinceID == "" && minID == "":
- err = t.prepareXBetweenIDs(ctx, amount, id.Highest, id.Lowest, true)
- case maxID != "" && sinceID == "" && minID == "":
- err = t.prepareXBetweenIDs(ctx, amount, maxID, id.Lowest, true)
- case maxID != "" && sinceID != "":
- err = t.prepareXBetweenIDs(ctx, amount, maxID, sinceID, true)
- case maxID != "" && minID != "":
- err = t.prepareXBetweenIDs(ctx, amount, maxID, minID, false)
- case maxID == "" && sinceID != "":
- err = t.prepareXBetweenIDs(ctx, amount, id.Highest, sinceID, true)
- case maxID == "" && minID != "":
- err = t.prepareXBetweenIDs(ctx, amount, id.Highest, minID, false)
- default:
- err = gtserror.New("switch statement exhausted with no results")
- }
-
- if err != nil {
- log.
- WithContext(ctx).
- WithFields(kv.Fields{
- {"amount", amount},
- {"maxID", maxID},
- {"sinceID", sinceID},
- {"minID", minID},
- }...).
- Warnf("error preparing next query: %s", err)
- }
- }()
-}
diff --git a/internal/timeline/get_test.go b/internal/timeline/get_test.go
deleted file mode 100644
index 91a456560..000000000
--- a/internal/timeline/get_test.go
+++ /dev/null
@@ -1,704 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline_test
-
-import (
- "context"
- "sync"
- "testing"
-
- "github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/gtscontext"
- "github.com/superseriousbusiness/gotosocial/internal/id"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
-)
-
-type GetTestSuite struct {
- TimelineStandardTestSuite
-}
-
-func (suite *GetTestSuite) checkStatuses(statuses []timeline.Preparable, maxID string, minID string, expectedLength int) {
- if l := len(statuses); l != expectedLength {
- suite.FailNow("", "expected %d statuses in slice, got %d", expectedLength, l)
- } else if l == 0 {
- // Can't test empty slice.
- return
- }
-
- // Check ordering + bounds of statuses.
- highest := statuses[0].GetID()
- for _, status := range statuses {
- id := status.GetID()
-
- if id >= maxID {
- suite.FailNow("", "%s greater than maxID %s", id, maxID)
- }
-
- if id <= minID {
- suite.FailNow("", "%s smaller than minID %s", id, minID)
- }
-
- if id > highest {
- suite.FailNow("", "statuses in slice were not ordered highest -> lowest ID")
- }
-
- highest = id
- }
-}
-
-func (suite *GetTestSuite) emptyAccountFollows(ctx context.Context, accountID string) {
- // Get all of account's follows.
- follows, err := suite.state.DB.GetAccountFollows(
- gtscontext.SetBarebones(ctx),
- accountID,
- nil, // select all
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- // Remove each follow.
- for _, follow := range follows {
- if err := suite.state.DB.DeleteFollowByID(ctx, follow.ID); err != nil {
- suite.FailNow(err.Error())
- }
- }
-
- // Ensure no follows left.
- follows, err = suite.state.DB.GetAccountFollows(
- gtscontext.SetBarebones(ctx),
- accountID,
- nil, // select all
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- if len(follows) != 0 {
- suite.FailNow("follows should be empty")
- }
-}
-
-func (suite *GetTestSuite) emptyAccountStatuses(ctx context.Context, accountID string) {
- // Get all of account's statuses.
- statuses, err := suite.state.DB.GetAccountStatuses(
- ctx,
- accountID,
- 9999,
- false,
- false,
- id.Highest,
- id.Lowest,
- false,
- false,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- // Remove each status.
- for _, status := range statuses {
- if err := suite.state.DB.DeleteStatusByID(ctx, status.ID); err != nil {
- suite.FailNow(err.Error())
- }
- }
-}
-
-func (suite *GetTestSuite) TestGetNewTimelinePageDown() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 5
- local = false
- )
-
- // Get 5 from the top.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, id.Lowest, 5)
-
- // Get 5 from next maxID.
- maxID = statuses[len(statuses)-1].GetID()
- statuses, err = suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, maxID, id.Lowest, 5)
-}
-
-func (suite *GetTestSuite) TestGetNewTimelinePageUp() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = id.Lowest
- limit = 5
- local = false
- )
-
- // Get 5 from the back.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, minID, 5)
-
- // Page up from next minID.
- minID = statuses[0].GetID()
- statuses, err = suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, minID, 5)
-}
-
-func (suite *GetTestSuite) TestGetNewTimelineMoreThanPossible() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 100
- local = false
- )
-
- // Get 100 from the top.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, id.Lowest, 22)
-}
-
-func (suite *GetTestSuite) TestGetNewTimelineMoreThanPossiblePageUp() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = id.Lowest
- limit = 100
- local = false
- )
-
- // Get 100 from the back.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, id.Lowest, 22)
-}
-
-func (suite *GetTestSuite) TestGetNewTimelineNoFollowing() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 10
- local = false
- )
-
- suite.emptyAccountFollows(ctx, testAccount.ID)
-
- // Try to get 10 from the top of the timeline.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, id.Lowest, 9)
-
- for _, s := range statuses {
- if s.GetAccountID() != testAccount.ID {
- suite.FailNow("timeline with no follows should only contain posts by timeline owner account")
- }
- }
-}
-
-func (suite *GetTestSuite) TestGetNewTimelineNoFollowingNoStatuses() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 5
- local = false
- )
-
- suite.emptyAccountFollows(ctx, testAccount.ID)
- suite.emptyAccountStatuses(ctx, testAccount.ID)
-
- // Try to get 5 from the top of the timeline.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.checkStatuses(statuses, id.Highest, id.Lowest, 0)
-}
-
-func (suite *GetTestSuite) TestGetNoParams() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 10
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Get 10 statuses from the top (no params).
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, id.Lowest, 10)
-
- // First status should have the highest ID in the testrig.
- suite.Equal(suite.highestStatusID, statuses[0].GetID())
-}
-
-func (suite *GetTestSuite) TestGetMaxID() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = "01F8MHBQCBTDKN6X5VHGMMN4MA"
- sinceID = ""
- minID = ""
- limit = 10
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 10 with a max ID somewhere in the middle of the stack.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- // We'll only get 6 statuses back.
- suite.checkStatuses(statuses, maxID, id.Lowest, 6)
-}
-
-func (suite *GetTestSuite) TestGetSinceID() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = "01F8MHBQCBTDKN6X5VHGMMN4MA"
- minID = ""
- limit = 10
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 10 with a since ID somewhere in the middle of the stack.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, sinceID, 10)
-
- // The first status in the stack should have the highest ID of all
- // in the testrig, because we're paging down.
- suite.Equal(suite.highestStatusID, statuses[0].GetID())
-}
-
-func (suite *GetTestSuite) TestGetSinceIDOneOnly() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = "01F8MHBQCBTDKN6X5VHGMMN4MA"
- minID = ""
- limit = 1
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 1 with a since ID somewhere in the middle of the stack.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, sinceID, 1)
-
- // The one status we got back should have the highest ID of all in
- // the testrig, because using sinceID means we're paging down.
- suite.Equal(suite.highestStatusID, statuses[0].GetID())
-}
-
-func (suite *GetTestSuite) TestGetMinID() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = "01F8MHBQCBTDKN6X5VHGMMN4MA"
- limit = 5
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 5 with a min ID somewhere in the middle of the stack.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, minID, 5)
-
- // We're paging up so even the highest status ID in the pile
- // shouldn't be the highest ID we have.
- suite.NotEqual(suite.highestStatusID, statuses[0])
-}
-
-func (suite *GetTestSuite) TestGetMinIDOneOnly() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = "01F8MHBQCBTDKN6X5VHGMMN4MA"
- limit = 1
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 1 with a min ID somewhere in the middle of the stack.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, minID, 1)
-
- // The one status we got back should have the an ID equal to the
- // one ID immediately newer than it.
- suite.Equal("01F8MHC0H0A7XHTVH5F596ZKBM", statuses[0].GetID())
-}
-
-func (suite *GetTestSuite) TestGetMinIDFromLowestInTestrig() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = suite.lowestStatusID
- limit = 1
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 1 with minID equal to the lowest status in the testrig.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, minID, 1)
-
- // The one status we got back should have an id higher than
- // the lowest status in the testrig, since minID is not inclusive.
- suite.Greater(statuses[0].GetID(), suite.lowestStatusID)
-}
-
-func (suite *GetTestSuite) TestGetMinIDFromLowestPossible() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = id.Lowest
- limit = 1
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 1 with the lowest possible min ID.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.checkStatuses(statuses, id.Highest, minID, 1)
-
- // The one status we got back should have the an ID equal to the
- // lowest ID status in the test rig.
- suite.Equal(suite.lowestStatusID, statuses[0].GetID())
-}
-
-func (suite *GetTestSuite) TestGetBetweenID() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = "01F8MHCP5P2NWYQ416SBA0XSEV"
- sinceID = ""
- minID = "01F8MHBQCBTDKN6X5VHGMMN4MA"
- limit = 10
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 10 between these two IDs
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- // There's only two statuses between these two IDs.
- suite.checkStatuses(statuses, maxID, minID, 2)
-}
-
-func (suite *GetTestSuite) TestGetBetweenIDImpossible() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = id.Lowest
- sinceID = ""
- minID = id.Highest
- limit = 10
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Ask for 10 between these two IDs which present
- // an impossible query.
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- // We should have nothing back.
- suite.checkStatuses(statuses, maxID, minID, 0)
-}
-
-func (suite *GetTestSuite) TestGetTimelinesAsync() {
- var (
- ctx = context.Background()
- accountToNuke = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 5
- local = false
- multiplier = 5
- )
-
- // Nuke one account's statuses and follows,
- // as though the account had just been created.
- suite.emptyAccountFollows(ctx, accountToNuke.ID)
- suite.emptyAccountStatuses(ctx, accountToNuke.ID)
-
- // Get 5 statuses from each timeline in
- // our testrig at the same time, five times.
- wg := new(sync.WaitGroup)
- wg.Add(len(suite.testAccounts) * multiplier)
-
- for i := 0; i < multiplier; i++ {
- go func() {
- for _, testAccount := range suite.testAccounts {
- if _, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- ); err != nil {
- suite.Fail(err.Error())
- }
-
- wg.Done()
- }
- }()
- }
-
- wg.Wait() // Wait until all get calls have returned.
-}
-
-func TestGetTestSuite(t *testing.T) {
- suite.Run(t, new(GetTestSuite))
-}
diff --git a/internal/timeline/index.go b/internal/timeline/index.go
deleted file mode 100644
index 6abb6d28d..000000000
--- a/internal/timeline/index.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "container/list"
- "context"
- "errors"
-
- "codeberg.org/gruf/go-kv"
- "github.com/superseriousbusiness/gotosocial/internal/db"
- "github.com/superseriousbusiness/gotosocial/internal/gtserror"
- "github.com/superseriousbusiness/gotosocial/internal/log"
-)
-
-func (t *timeline) indexXBetweenIDs(ctx context.Context, amount int, behindID string, beforeID string, frontToBack bool) error {
- l := log.
- WithContext(ctx).
- WithFields(kv.Fields{
- {"amount", amount},
- {"behindID", behindID},
- {"beforeID", beforeID},
- {"frontToBack", frontToBack},
- }...)
- l.Trace("entering indexXBetweenIDs")
-
- if beforeID >= behindID {
- // This is an impossible situation, we
- // can't index anything between these.
- return nil
- }
-
- t.Lock()
- defer t.Unlock()
-
- // Lazily init indexed items.
- if t.items.data == nil {
- t.items.data = &list.List{}
- t.items.data.Init()
- }
-
- // Start by mapping out the list so we know what
- // we have to do. Depending on the current state
- // of the list we might not have to do *anything*.
- var (
- position int
- listLen = t.items.data.Len()
- behindIDPosition int
- beforeIDPosition int
- )
-
- for e := t.items.data.Front(); e != nil; e = e.Next() {
- entry := e.Value.(*indexedItemsEntry)
-
- position++
-
- if entry.itemID > behindID {
- l.Trace("item is too new, continuing")
- continue
- }
-
- if behindIDPosition == 0 {
- // Gone far enough through the list
- // and found our behindID mark.
- // We only need to set this once.
- l.Tracef("found behindID mark %s at position %d", entry.itemID, position)
- behindIDPosition = position
- }
-
- if entry.itemID >= beforeID {
- // Push the beforeID mark back
- // one place every iteration.
- l.Tracef("setting beforeID mark %s at position %d", entry.itemID, position)
- beforeIDPosition = position
- }
-
- if entry.itemID <= beforeID {
- // We've gone beyond the bounds of
- // items we're interested in; stop.
- l.Trace("reached older items, breaking")
- break
- }
- }
-
- // We can now figure out if we need to make db calls.
- var grabMore bool
- switch {
- case listLen < amount:
- // The whole list is shorter than the
- // amount we're being asked to return,
- // make up the difference.
- grabMore = true
- amount -= listLen
- case beforeIDPosition-behindIDPosition < amount:
- // Not enough items between behindID and
- // beforeID to return amount required,
- // try to get more.
- grabMore = true
- }
-
- if !grabMore {
- // We're good!
- return nil
- }
-
- // Fetch additional items.
- items, err := t.grab(ctx, amount, behindID, beforeID, frontToBack)
- if err != nil {
- return err
- }
-
- // Index all the items we got. We already have
- // a lock on the timeline, so don't call IndexOne
- // here, since that will also try to get a lock!
- for _, item := range items {
- entry := &indexedItemsEntry{
- itemID: item.GetID(),
- boostOfID: item.GetBoostOfID(),
- accountID: item.GetAccountID(),
- boostOfAccountID: item.GetBoostOfAccountID(),
- }
-
- if _, err := t.items.insertIndexed(ctx, entry); err != nil {
- return gtserror.Newf("error inserting entry with itemID %s into index: %w", entry.itemID, err)
- }
- }
-
- return nil
-}
-
-// grab wraps the timeline's grabFunction in paging + filtering logic.
-func (t *timeline) grab(ctx context.Context, amount int, behindID string, beforeID string, frontToBack bool) ([]Timelineable, error) {
- var (
- sinceID string
- minID string
- grabbed int
- maxID = behindID
- filtered = make([]Timelineable, 0, amount)
- )
-
- if frontToBack {
- sinceID = beforeID
- } else {
- minID = beforeID
- }
-
- for attempts := 0; attempts < 5; attempts++ {
- if grabbed >= amount {
- // We got everything we needed.
- break
- }
-
- items, stop, err := t.grabFunction(
- ctx,
- t.timelineID,
- maxID,
- sinceID,
- minID,
- // Don't grab more than we need to.
- amount-grabbed,
- )
- if err != nil {
- // Grab function already checks for
- // db.ErrNoEntries, so if an error
- // is returned then it's a real one.
- return nil, err
- }
-
- if stop || len(items) == 0 {
- // No items left.
- break
- }
-
- // Set next query parameters.
- if frontToBack {
- // Page down.
- maxID = items[len(items)-1].GetID()
- if maxID <= beforeID {
- // Can't go any further.
- break
- }
- } else {
- // Page up.
- minID = items[0].GetID()
- if minID >= behindID {
- // Can't go any further.
- break
- }
- }
-
- for _, item := range items {
- ok, err := t.filterFunction(ctx, t.timelineID, item)
- if err != nil {
- if !errors.Is(err, db.ErrNoEntries) {
- // Real error here.
- return nil, err
- }
- log.Warnf(ctx, "errNoEntries while filtering item %s: %s", item.GetID(), err)
- continue
- }
-
- if ok {
- filtered = append(filtered, item)
- grabbed++ // count this as grabbed
- }
- }
- }
-
- return filtered, nil
-}
-
-func (t *timeline) IndexAndPrepareOne(ctx context.Context, statusID string, boostOfID string, accountID string, boostOfAccountID string) (bool, error) {
- t.Lock()
- defer t.Unlock()
-
- postIndexEntry := &indexedItemsEntry{
- itemID: statusID,
- boostOfID: boostOfID,
- accountID: accountID,
- boostOfAccountID: boostOfAccountID,
- }
-
- if inserted, err := t.items.insertIndexed(ctx, postIndexEntry); err != nil {
- return false, gtserror.Newf("error inserting indexed: %w", err)
- } else if !inserted {
- // Entry wasn't inserted, so
- // don't bother preparing it.
- return false, nil
- }
-
- preparable, err := t.prepareFunction(ctx, t.timelineID, statusID)
- if err != nil {
- return true, gtserror.Newf("error preparing: %w", err)
- }
- postIndexEntry.prepared = preparable
-
- return true, nil
-}
-
-func (t *timeline) Len() int {
- t.Lock()
- defer t.Unlock()
-
- if t.items == nil || t.items.data == nil {
- // indexedItems hasnt been initialized yet.
- return 0
- }
-
- return t.items.data.Len()
-}
-
-func (t *timeline) OldestIndexedItemID() string {
- t.Lock()
- defer t.Unlock()
-
- if t.items == nil || t.items.data == nil {
- // indexedItems hasnt been initialized yet.
- return ""
- }
-
- e := t.items.data.Back()
- if e == nil {
- // List was empty.
- return ""
- }
-
- return e.Value.(*indexedItemsEntry).itemID
-}
diff --git a/internal/timeline/index_test.go b/internal/timeline/index_test.go
deleted file mode 100644
index a7eeebb6e..000000000
--- a/internal/timeline/index_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline_test
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
-)
-
-type IndexTestSuite struct {
- TimelineStandardTestSuite
-}
-
-func (suite *IndexTestSuite) TestOldestIndexedItemIDEmpty() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- )
-
- // the oldest indexed post should be an empty string since there's nothing indexed yet
- postID := suite.state.Timelines.Home.GetOldestIndexedID(ctx, testAccountID)
- suite.Empty(postID)
-
- // indexLength should be 0
- suite.Zero(0, suite.state.Timelines.Home.GetIndexedLength(ctx, testAccountID))
-}
-
-func (suite *IndexTestSuite) TestIndexAlreadyIndexed() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- testStatus = suite.testStatuses["local_account_1_status_1"]
- )
-
- // index one post -- it should be indexed
- indexed, err := suite.state.Timelines.Home.IngestOne(ctx, testAccountID, testStatus)
- suite.NoError(err)
- suite.True(indexed)
-
- // try to index the same post again -- it should not be indexed
- indexed, err = suite.state.Timelines.Home.IngestOne(ctx, testAccountID, testStatus)
- suite.NoError(err)
- suite.False(indexed)
-}
-
-func (suite *IndexTestSuite) TestIndexBoostOfAlreadyIndexed() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- testStatus = suite.testStatuses["local_account_1_status_1"]
- boostOfTestStatus = >smodel.Status{
- CreatedAt: time.Now(),
- ID: "01FD4TA6G2Z6M7W8NJQ3K5WXYD",
- BoostOfID: testStatus.ID,
- AccountID: "01FD4TAY1C0NGEJVE9CCCX7QKS",
- BoostOfAccountID: testStatus.AccountID,
- }
- )
-
- // index one post -- it should be indexed
- indexed, err := suite.state.Timelines.Home.IngestOne(ctx, testAccountID, testStatus)
- suite.NoError(err)
- suite.True(indexed)
-
- // try to index the a boost of that post -- it should not be indexed
- indexed, err = suite.state.Timelines.Home.IngestOne(ctx, testAccountID, boostOfTestStatus)
- suite.NoError(err)
- suite.False(indexed)
-}
-
-func TestIndexTestSuite(t *testing.T) {
- suite.Run(t, new(IndexTestSuite))
-}
diff --git a/internal/timeline/indexeditems.go b/internal/timeline/indexeditems.go
deleted file mode 100644
index 9b75e7256..000000000
--- a/internal/timeline/indexeditems.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "container/list"
- "context"
-
- "github.com/superseriousbusiness/gotosocial/internal/gtserror"
-)
-
-type indexedItems struct {
- data *list.List
- skipInsert SkipInsertFunction
-}
-
-type indexedItemsEntry struct {
- itemID string
- boostOfID string
- accountID string
- boostOfAccountID string
- prepared Preparable
-}
-
-// WARNING: ONLY CALL THIS FUNCTION IF YOU ALREADY HAVE
-// A LOCK ON THE TIMELINE CONTAINING THIS INDEXEDITEMS!
-func (i *indexedItems) insertIndexed(ctx context.Context, newEntry *indexedItemsEntry) (bool, error) {
- // Lazily init indexed items.
- if i.data == nil {
- i.data = &list.List{}
- i.data.Init()
- }
-
- if i.data.Len() == 0 {
- // We have no entries yet, meaning this is both the
- // newest + oldest entry, so just put it in the front.
- i.data.PushFront(newEntry)
- return true, nil
- }
-
- var (
- insertMark *list.Element
- currentPosition int
- )
-
- // We need to iterate through the index to make sure we put
- // this item in the appropriate place according to its id.
- // We also need to make sure we're not inserting a duplicate
- // item -- this can happen sometimes and it's sucky UX.
- for e := i.data.Front(); e != nil; e = e.Next() {
- currentPosition++
-
- currentEntry := e.Value.(*indexedItemsEntry)
-
- // Check if we need to skip inserting this item based on
- // the current item.
- //
- // For example, if the new item is a boost, and the current
- // item is the original, we may not want to insert the boost
- // if it would appear very shortly after the original.
- if skip, err := i.skipInsert(
- ctx,
- newEntry.itemID,
- newEntry.accountID,
- newEntry.boostOfID,
- newEntry.boostOfAccountID,
- currentEntry.itemID,
- currentEntry.accountID,
- currentEntry.boostOfID,
- currentEntry.boostOfAccountID,
- currentPosition,
- ); err != nil {
- return false, gtserror.Newf("error calling skipInsert: %w", err)
- } else if skip {
- // We don't need to insert this at all,
- // so we can safely bail.
- return false, nil
- }
-
- if insertMark != nil {
- // We already found our mark.
- continue
- }
-
- if currentEntry.itemID > newEntry.itemID {
- // We're still in items newer than
- // the one we're trying to insert.
- continue
- }
-
- // We found our spot!
- insertMark = e
- }
-
- if insertMark == nil {
- // We looked through the whole timeline and didn't find
- // a mark, so the new item is the oldest item we've seen;
- // insert it at the back.
- i.data.PushBack(newEntry)
- return true, nil
- }
-
- i.data.InsertBefore(newEntry, insertMark)
- return true, nil
-}
diff --git a/internal/timeline/manager.go b/internal/timeline/manager.go
deleted file mode 100644
index b4f075138..000000000
--- a/internal/timeline/manager.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/superseriousbusiness/gotosocial/internal/gtserror"
- "github.com/superseriousbusiness/gotosocial/internal/log"
-)
-
-const (
- pruneLengthIndexed = 400
- pruneLengthPrepared = 50
-)
-
-// Manager abstracts functions for creating multiple timelines, and adding, removing, and fetching entries from those timelines.
-//
-// By the time a timelineable hits the manager interface, it should already have been filtered and it should be established that the item indeed
-// belongs in the given timeline.
-//
-// The manager makes a distinction between *indexed* items and *prepared* items.
-//
-// Indexed items consist of just that item's ID (in the database) and the time it was created. An indexed item takes up very little memory, so
-// it's not a huge priority to keep trimming the indexed items list.
-//
-// Prepared items consist of the item's database ID, the time it was created, AND the apimodel representation of that item, for quick serialization.
-// Prepared items of course take up more memory than indexed items, so they should be regularly pruned if they're not being actively served.
-type Manager interface {
- // IngestOne takes one timelineable and indexes it into the given timeline, and then immediately prepares it for serving.
- // This is useful in cases where we know the item will need to be shown at the top of a user's timeline immediately (eg., a new status is created).
- //
- // It should already be established before calling this function that the item actually belongs in the timeline!
- //
- // The returned bool indicates whether the item was actually put in the timeline. This could be false in cases where
- // a status is a boost, but a boost of the original status or the status itself already exists recently in the timeline.
- IngestOne(ctx context.Context, timelineID string, item Timelineable) (bool, error)
-
- // GetTimeline returns limit n amount of prepared entries from the given timeline, in descending chronological order.
- GetTimeline(ctx context.Context, timelineID string, maxID string, sinceID string, minID string, limit int, local bool) ([]Preparable, error)
-
- // GetIndexedLength returns the amount of items that have been indexed for the given account ID.
- GetIndexedLength(ctx context.Context, timelineID string) int
-
- // GetOldestIndexedID returns the id ID for the oldest item that we have indexed for the given timeline.
- // Will be an empty string if nothing is (yet) indexed.
- GetOldestIndexedID(ctx context.Context, timelineID string) string
-
- // Remove removes one item from the given timeline.
- Remove(ctx context.Context, timelineID string, itemID string) (int, error)
-
- // RemoveTimeline completely removes one timeline.
- RemoveTimeline(ctx context.Context, timelineID string) error
-
- // WipeItemFromAllTimelines removes one item from the index and prepared items of all timelines
- WipeItemFromAllTimelines(ctx context.Context, itemID string) error
-
- // WipeStatusesFromAccountID removes all items by the given accountID from the given timeline.
- WipeItemsFromAccountID(ctx context.Context, timelineID string, accountID string) error
-
- // UnprepareItem unprepares/uncaches the prepared version fo the given itemID from the given timelineID.
- // Use this for cache invalidation when the prepared representation of an item has changed.
- UnprepareItem(ctx context.Context, timelineID string, itemID string) error
-
- // UnprepareItemFromAllTimelines unprepares/uncaches the prepared version of the given itemID from all timelines.
- // Use this for cache invalidation when the prepared representation of an item has changed.
- UnprepareItemFromAllTimelines(ctx context.Context, itemID string) error
-
- // Prune manually triggers a prune operation for the given timelineID.
- Prune(ctx context.Context, timelineID string, desiredPreparedItemsLength int, desiredIndexedItemsLength int) (int, error)
-
- // Start starts hourly cleanup jobs for this timeline manager.
- Start() error
-
- // Stop stops the timeline manager (currently a stub, doesn't do anything).
- Stop() error
-}
-
-// NewManager returns a new timeline manager.
-func NewManager(grabFunction GrabFunction, filterFunction FilterFunction, prepareFunction PrepareFunction, skipInsertFunction SkipInsertFunction) Manager {
- return &manager{
- timelines: sync.Map{},
- grabFunction: grabFunction,
- filterFunction: filterFunction,
- prepareFunction: prepareFunction,
- skipInsertFunction: skipInsertFunction,
- }
-}
-
-type manager struct {
- timelines sync.Map
- grabFunction GrabFunction
- filterFunction FilterFunction
- prepareFunction PrepareFunction
- skipInsertFunction SkipInsertFunction
-}
-
-func (m *manager) Start() error {
- // Start a background goroutine which iterates
- // through all stored timelines once per hour,
- // and cleans up old entries if that timeline
- // hasn't been accessed in the last hour.
- go func() {
- for now := range time.NewTicker(1 * time.Hour).C {
- now := now // rescope
- // Define the range function inside here,
- // so that we can use the 'now' returned
- // by the ticker, instead of having to call
- // time.Now() multiple times.
- //
- // Unless it panics, this function always
- // returns 'true', to continue the Range
- // call through the sync.Map.
- f := func(_ any, v any) bool {
- timeline, ok := v.(Timeline)
- if !ok {
- log.Panic(nil, "couldn't parse timeline manager sync map value as Timeline, this should never happen so panic")
- }
-
- if now.Sub(timeline.LastGot()) < 1*time.Hour {
- // Timeline has been fetched in the
- // last hour, move on to the next one.
- return true
- }
-
- if amountPruned := timeline.Prune(pruneLengthPrepared, pruneLengthIndexed); amountPruned > 0 {
- log.WithField("accountID", timeline.TimelineID()).Infof("pruned %d indexed and prepared items from timeline", amountPruned)
- }
-
- return true
- }
-
- // Execute the function for each timeline.
- m.timelines.Range(f)
- }
- }()
-
- return nil
-}
-
-func (m *manager) Stop() error {
- return nil
-}
-
-func (m *manager) IngestOne(ctx context.Context, timelineID string, item Timelineable) (bool, error) {
- return m.getOrCreateTimeline(ctx, timelineID).IndexAndPrepareOne(
- ctx,
- item.GetID(),
- item.GetBoostOfID(),
- item.GetAccountID(),
- item.GetBoostOfAccountID(),
- )
-}
-
-func (m *manager) Remove(ctx context.Context, timelineID string, itemID string) (int, error) {
- return m.getOrCreateTimeline(ctx, timelineID).Remove(ctx, itemID)
-}
-
-func (m *manager) RemoveTimeline(ctx context.Context, timelineID string) error {
- m.timelines.Delete(timelineID)
- return nil
-}
-
-func (m *manager) GetTimeline(ctx context.Context, timelineID string, maxID string, sinceID string, minID string, limit int, local bool) ([]Preparable, error) {
- return m.getOrCreateTimeline(ctx, timelineID).Get(ctx, limit, maxID, sinceID, minID, true)
-}
-
-func (m *manager) GetIndexedLength(ctx context.Context, timelineID string) int {
- return m.getOrCreateTimeline(ctx, timelineID).Len()
-}
-
-func (m *manager) GetOldestIndexedID(ctx context.Context, timelineID string) string {
- return m.getOrCreateTimeline(ctx, timelineID).OldestIndexedItemID()
-}
-
-func (m *manager) WipeItemFromAllTimelines(ctx context.Context, itemID string) error {
- errs := new(gtserror.MultiError)
-
- m.timelines.Range(func(_ any, v any) bool {
- if _, err := v.(Timeline).Remove(ctx, itemID); err != nil {
- errs.Append(err)
- }
-
- return true // always continue range
- })
-
- if err := errs.Combine(); err != nil {
- return gtserror.Newf("error(s) wiping status %s: %w", itemID, errs.Combine())
- }
-
- return nil
-}
-
-func (m *manager) WipeItemsFromAccountID(ctx context.Context, timelineID string, accountID string) error {
- _, err := m.getOrCreateTimeline(ctx, timelineID).RemoveAllByOrBoosting(ctx, accountID)
- return err
-}
-
-func (m *manager) UnprepareItemFromAllTimelines(ctx context.Context, itemID string) error {
- errs := new(gtserror.MultiError)
-
- // Work through all timelines held by this
- // manager, and call Unprepare for each.
- m.timelines.Range(func(_ any, v any) bool {
- if err := v.(Timeline).Unprepare(ctx, itemID); err != nil {
- errs.Append(err)
- }
-
- return true // always continue range
- })
-
- if err := errs.Combine(); err != nil {
- return gtserror.Newf("error(s) unpreparing status %s: %w", itemID, errs.Combine())
- }
-
- return nil
-}
-
-func (m *manager) UnprepareItem(ctx context.Context, timelineID string, itemID string) error {
- return m.getOrCreateTimeline(ctx, timelineID).Unprepare(ctx, itemID)
-}
-
-func (m *manager) Prune(ctx context.Context, timelineID string, desiredPreparedItemsLength int, desiredIndexedItemsLength int) (int, error) {
- return m.getOrCreateTimeline(ctx, timelineID).Prune(desiredPreparedItemsLength, desiredIndexedItemsLength), nil
-}
-
-// getOrCreateTimeline returns a timeline with the given id,
-// creating a new timeline with that id if necessary.
-func (m *manager) getOrCreateTimeline(ctx context.Context, timelineID string) Timeline {
- i, ok := m.timelines.Load(timelineID)
- if ok {
- // Timeline already existed in sync.Map.
- return i.(Timeline)
- }
-
- // Timeline did not yet exist in sync.Map.
- // Create + store it.
- timeline := NewTimeline(ctx, timelineID, m.grabFunction, m.filterFunction, m.prepareFunction, m.skipInsertFunction)
- m.timelines.Store(timelineID, timeline)
-
- return timeline
-}
diff --git a/internal/timeline/prepare.go b/internal/timeline/prepare.go
deleted file mode 100644
index ec595ce42..000000000
--- a/internal/timeline/prepare.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "container/list"
- "context"
- "errors"
-
- "codeberg.org/gruf/go-kv"
- "github.com/superseriousbusiness/gotosocial/internal/db"
- statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status"
- "github.com/superseriousbusiness/gotosocial/internal/gtserror"
- "github.com/superseriousbusiness/gotosocial/internal/log"
-)
-
-func (t *timeline) prepareXBetweenIDs(ctx context.Context, amount int, behindID string, beforeID string, frontToBack bool) error {
- l := log.
- WithContext(ctx).
- WithFields(kv.Fields{
- {"amount", amount},
- {"behindID", behindID},
- {"beforeID", beforeID},
- {"frontToBack", frontToBack},
- }...)
- l.Trace("entering prepareXBetweenIDs")
-
- if beforeID >= behindID {
- // This is an impossible situation, we
- // can't prepare anything between these.
- return nil
- }
-
- if err := t.indexXBetweenIDs(ctx, amount, behindID, beforeID, frontToBack); err != nil {
- // An error here doesn't necessarily mean we
- // can't prepare anything, so log + keep going.
- l.Debugf("error calling prepareXBetweenIDs: %s", err)
- }
-
- t.Lock()
- defer t.Unlock()
-
- // Try to prepare everything between (and including) the two points.
- var (
- toPrepare = make(map[*list.Element]*indexedItemsEntry)
- foundToPrepare int
- )
-
- if frontToBack {
- // Paging forwards / down.
- for e := t.items.data.Front(); e != nil; e = e.Next() {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.itemID > behindID {
- l.Trace("item is too new, continuing")
- continue
- }
-
- if entry.itemID < beforeID {
- // We've gone beyond the bounds of
- // items we're interested in; stop.
- l.Trace("reached older items, breaking")
- break
- }
-
- // Only prepare entry if it's not
- // already prepared, save db calls.
- if entry.prepared == nil {
- toPrepare[e] = entry
- }
-
- foundToPrepare++
- if foundToPrepare >= amount {
- break
- }
- }
- } else {
- // Paging backwards / up.
- for e := t.items.data.Back(); e != nil; e = e.Prev() {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.itemID < beforeID {
- l.Trace("item is too old, continuing")
- continue
- }
-
- if entry.itemID > behindID {
- // We've gone beyond the bounds of
- // items we're interested in; stop.
- l.Trace("reached newer items, breaking")
- break
- }
-
- if entry.prepared == nil {
- toPrepare[e] = entry
- }
-
- // Only prepare entry if it's not
- // already prepared, save db calls.
- foundToPrepare++
- if foundToPrepare >= amount {
- break
- }
- }
- }
-
- for e, entry := range toPrepare {
- prepared, err := t.prepareFunction(ctx, t.timelineID, entry.itemID)
- if err != nil {
- if errors.Is(err, statusfilter.ErrHideStatus) {
- // This item has been filtered out by the requesting user's filters.
- // Remove it and skip past it.
- t.items.data.Remove(e)
- continue
- }
- if errors.Is(err, db.ErrNoEntries) {
- // ErrNoEntries means something has been deleted,
- // so we'll likely not be able to ever prepare this.
- // This means we can remove it and skip past it.
- l.Debugf("db.ErrNoEntries while trying to prepare %s; will remove from timeline", entry.itemID)
- t.items.data.Remove(e)
- continue
- }
- // We've got a proper db error.
- return gtserror.Newf("db error while trying to prepare %s: %w", entry.itemID, err)
- }
- entry.prepared = prepared
- }
-
- return nil
-}
diff --git a/internal/timeline/prune.go b/internal/timeline/prune.go
deleted file mode 100644
index 5c7476956..000000000
--- a/internal/timeline/prune.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "container/list"
-)
-
-func (t *timeline) Prune(desiredPreparedItemsLength int, desiredIndexedItemsLength int) int {
- t.Lock()
- defer t.Unlock()
-
- l := t.items.data
- if l == nil {
- // Nothing to prune.
- return 0
- }
-
- var (
- position int
- totalPruned int
- toRemove *[]*list.Element
- )
-
- // Only initialize toRemove if we know we're
- // going to need it, otherwise skiperino.
- if toRemoveLen := t.items.data.Len() - desiredIndexedItemsLength; toRemoveLen > 0 {
- toRemove = func() *[]*list.Element { tr := make([]*list.Element, 0, toRemoveLen); return &tr }()
- }
-
- // Work from the front of the list until we get
- // to the point where we need to start pruning.
- for e := l.Front(); e != nil; e = e.Next() {
- position++
-
- if position <= desiredPreparedItemsLength {
- // We're still within our allotted
- // prepped length, nothing to do yet.
- continue
- }
-
- // We need to *at least* unprepare this entry.
- // If we're beyond our indexed length already,
- // we can just remove the item completely.
- if position > desiredIndexedItemsLength {
- *toRemove = append(*toRemove, e)
- totalPruned++
- continue
- }
-
- entry := e.Value.(*indexedItemsEntry)
- if entry.prepared == nil {
- // It's already unprepared (mood).
- continue
- }
-
- entry.prepared = nil // <- eat this up please garbage collector nom nom nom
- totalPruned++
- }
-
- if toRemove != nil {
- for _, e := range *toRemove {
- l.Remove(e)
- }
- }
-
- return totalPruned
-}
diff --git a/internal/timeline/prune_test.go b/internal/timeline/prune_test.go
deleted file mode 100644
index 6ff67d505..000000000
--- a/internal/timeline/prune_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline_test
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/suite"
-)
-
-type PruneTestSuite struct {
- TimelineStandardTestSuite
-}
-
-func (suite *PruneTestSuite) TestPrune() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- desiredPreparedItemsLength = 5
- desiredIndexedItemsLength = 5
- )
-
- suite.fillTimeline(testAccountID)
-
- pruned, err := suite.state.Timelines.Home.Prune(ctx, testAccountID, desiredPreparedItemsLength, desiredIndexedItemsLength)
- suite.NoError(err)
- suite.Equal(25, pruned)
- suite.Equal(5, suite.state.Timelines.Home.GetIndexedLength(ctx, testAccountID))
-}
-
-func (suite *PruneTestSuite) TestPruneTwice() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- desiredPreparedItemsLength = 5
- desiredIndexedItemsLength = 5
- )
-
- suite.fillTimeline(testAccountID)
-
- pruned, err := suite.state.Timelines.Home.Prune(ctx, testAccountID, desiredPreparedItemsLength, desiredIndexedItemsLength)
- suite.NoError(err)
- suite.Equal(25, pruned)
- suite.Equal(5, suite.state.Timelines.Home.GetIndexedLength(ctx, testAccountID))
-
- // Prune same again, nothing should be pruned this time.
- pruned, err = suite.state.Timelines.Home.Prune(ctx, testAccountID, desiredPreparedItemsLength, desiredIndexedItemsLength)
- suite.NoError(err)
- suite.Equal(0, pruned)
- suite.Equal(5, suite.state.Timelines.Home.GetIndexedLength(ctx, testAccountID))
-}
-
-func (suite *PruneTestSuite) TestPruneTo0() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- desiredPreparedItemsLength = 0
- desiredIndexedItemsLength = 0
- )
-
- suite.fillTimeline(testAccountID)
-
- pruned, err := suite.state.Timelines.Home.Prune(ctx, testAccountID, desiredPreparedItemsLength, desiredIndexedItemsLength)
- suite.NoError(err)
- suite.Equal(30, pruned)
- suite.Equal(0, suite.state.Timelines.Home.GetIndexedLength(ctx, testAccountID))
-}
-
-func (suite *PruneTestSuite) TestPruneToInfinityAndBeyond() {
- var (
- ctx = context.Background()
- testAccountID = suite.testAccounts["local_account_1"].ID
- desiredPreparedItemsLength = 9999999
- desiredIndexedItemsLength = 9999999
- )
-
- suite.fillTimeline(testAccountID)
-
- pruned, err := suite.state.Timelines.Home.Prune(ctx, testAccountID, desiredPreparedItemsLength, desiredIndexedItemsLength)
- suite.NoError(err)
- suite.Equal(0, pruned)
- suite.Equal(30, suite.state.Timelines.Home.GetIndexedLength(ctx, testAccountID))
-}
-
-func TestPruneTestSuite(t *testing.T) {
- suite.Run(t, new(PruneTestSuite))
-}
diff --git a/internal/timeline/remove.go b/internal/timeline/remove.go
deleted file mode 100644
index 86352b9fa..000000000
--- a/internal/timeline/remove.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "container/list"
- "context"
-
- "codeberg.org/gruf/go-kv"
- "github.com/superseriousbusiness/gotosocial/internal/log"
-)
-
-func (t *timeline) Remove(ctx context.Context, statusID string) (int, error) {
- l := log.WithContext(ctx).
- WithFields(kv.Fields{
- {"accountTimeline", t.timelineID},
- {"statusID", statusID},
- }...)
-
- t.Lock()
- defer t.Unlock()
-
- if t.items == nil || t.items.data == nil {
- // Nothing to do.
- return 0, nil
- }
-
- var toRemove []*list.Element
- for e := t.items.data.Front(); e != nil; e = e.Next() {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.itemID != statusID {
- // Not relevant.
- continue
- }
-
- l.Debug("removing item")
- toRemove = append(toRemove, e)
- }
-
- for _, e := range toRemove {
- t.items.data.Remove(e)
- }
-
- return len(toRemove), nil
-}
-
-func (t *timeline) RemoveAllByOrBoosting(ctx context.Context, accountID string) (int, error) {
- l := log.
- WithContext(ctx).
- WithFields(kv.Fields{
- {"accountTimeline", t.timelineID},
- {"accountID", accountID},
- }...)
-
- t.Lock()
- defer t.Unlock()
-
- if t.items == nil || t.items.data == nil {
- // Nothing to do.
- return 0, nil
- }
-
- var toRemove []*list.Element
- for e := t.items.data.Front(); e != nil; e = e.Next() {
- entry := e.Value.(*indexedItemsEntry)
-
- if entry.accountID != accountID && entry.boostOfAccountID != accountID {
- // Not relevant.
- continue
- }
-
- l.Debug("removing item")
- toRemove = append(toRemove, e)
- }
-
- for _, e := range toRemove {
- t.items.data.Remove(e)
- }
-
- return len(toRemove), nil
-}
diff --git a/internal/timeline/timeline.go b/internal/timeline/timeline.go
deleted file mode 100644
index e7c609638..000000000
--- a/internal/timeline/timeline.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-import (
- "context"
- "sync"
- "time"
-)
-
-// GrabFunction is used by a Timeline to grab more items to index.
-//
-// It should be provided to NewTimeline when the caller is creating a timeline
-// (of statuses, notifications, etc).
-//
-// - timelineID: ID of the timeline.
-// - maxID: the maximum item ID desired.
-// - sinceID: the minimum item ID desired.
-// - minID: see sinceID
-// - limit: the maximum amount of items to be returned
-//
-// If an error is returned, the timeline will stop processing whatever request called GrabFunction,
-// and return the error. If no error is returned, but stop = true, this indicates to the caller of GrabFunction
-// that there are no more items to return, and processing should continue with the items already grabbed.
-type GrabFunction func(ctx context.Context, timelineID string, maxID string, sinceID string, minID string, limit int) (items []Timelineable, stop bool, err error)
-
-// FilterFunction is used by a Timeline to filter whether or not a grabbed item should be indexed.
-type FilterFunction func(ctx context.Context, timelineID string, item Timelineable) (shouldIndex bool, err error)
-
-// PrepareFunction converts a Timelineable into a Preparable.
-//
-// For example, this might result in the converstion of a *gtsmodel.Status with the given itemID into a serializable *apimodel.Status.
-type PrepareFunction func(ctx context.Context, timelineID string, itemID string) (Preparable, error)
-
-// SkipInsertFunction indicates whether a new item about to be inserted in the prepared list should be skipped,
-// based on the item itself, the next item in the timeline, and the depth at which nextItem has been found in the list.
-//
-// This will be called for every item found while iterating through a timeline, so callers should be very careful
-// not to do anything expensive here.
-type SkipInsertFunction func(ctx context.Context,
- newItemID string,
- newItemAccountID string,
- newItemBoostOfID string,
- newItemBoostOfAccountID string,
- nextItemID string,
- nextItemAccountID string,
- nextItemBoostOfID string,
- nextItemBoostOfAccountID string,
- depth int) (bool, error)
-
-// Timeline represents a timeline for one account, and contains indexed and prepared items.
-type Timeline interface {
- /*
- RETRIEVAL FUNCTIONS
- */
-
- // Get returns an amount of prepared items with the given parameters.
- // If prepareNext is true, then the next predicted query will be prepared already in a goroutine,
- // to make the next call to Get faster.
- Get(ctx context.Context, amount int, maxID string, sinceID string, minID string, prepareNext bool) ([]Preparable, error)
-
- /*
- INDEXING + PREPARATION FUNCTIONS
- */
-
- // IndexAndPrepareOne puts a item into the timeline at the appropriate place
- // according to its id, and then immediately prepares it.
- //
- // The returned bool indicates whether or not the item was actually inserted
- // into the timeline. This will be false if the item is a boost and the original
- // item, or a boost of it, already exists recently in the timeline.
- IndexAndPrepareOne(ctx context.Context, itemID string, boostOfID string, accountID string, boostOfAccountID string) (bool, error)
-
- // Unprepare clears the prepared version of the given item (and any boosts
- // thereof) from the timeline, but leaves the indexed version in place.
- //
- // This is useful for cache invalidation when the prepared version of the
- // item has changed for some reason (edits, updates, etc), but the item does
- // not need to be removed: it will be prepared again next time Get is called.
- Unprepare(ctx context.Context, itemID string) error
-
- /*
- INFO FUNCTIONS
- */
-
- // TimelineID returns the id of this timeline.
- TimelineID() string
-
- // Len returns the length of the item index at this point in time.
- Len() int
-
- // OldestIndexedItemID returns the id of the rearmost (ie., the oldest) indexed item.
- // If there's no oldest item, an empty string will be returned so make sure to check for this.
- OldestIndexedItemID() string
-
- /*
- UTILITY FUNCTIONS
- */
-
- // LastGot returns the time that Get was last called.
- LastGot() time.Time
-
- // Prune prunes prepared and indexed items in this timeline to the desired lengths.
- // This will be a no-op if the lengths are already < the desired values.
- //
- // The returned int indicates the amount of entries that were removed or unprepared.
- Prune(desiredPreparedItemsLength int, desiredIndexedItemsLength int) int
-
- // Remove removes an item with the given ID.
- //
- // If a item has multiple entries in a timeline, they will all be removed.
- //
- // The returned int indicates the amount of entries that were removed.
- Remove(ctx context.Context, itemID string) (int, error)
-
- // RemoveAllByOrBoosting removes all items created by or boosting the given accountID.
- //
- // The returned int indicates the amount of entries that were removed.
- RemoveAllByOrBoosting(ctx context.Context, accountID string) (int, error)
-}
-
-// timeline fulfils the Timeline interface
-type timeline struct {
- items *indexedItems
- grabFunction GrabFunction
- filterFunction FilterFunction
- prepareFunction PrepareFunction
- timelineID string
- lastGot time.Time
- sync.Mutex
-}
-
-func (t *timeline) TimelineID() string {
- return t.timelineID
-}
-
-// NewTimeline returns a new Timeline with
-// the given ID, using the given functions.
-func NewTimeline(
- ctx context.Context,
- timelineID string,
- grabFunction GrabFunction,
- filterFunction FilterFunction,
- prepareFunction PrepareFunction,
- skipInsertFunction SkipInsertFunction,
-) Timeline {
- return &timeline{
- items: &indexedItems{
- skipInsert: skipInsertFunction,
- },
- grabFunction: grabFunction,
- filterFunction: filterFunction,
- prepareFunction: prepareFunction,
- timelineID: timelineID,
- lastGot: time.Time{},
- }
-}
diff --git a/internal/timeline/timeline_test.go b/internal/timeline/timeline_test.go
deleted file mode 100644
index ffc6d6e53..000000000
--- a/internal/timeline/timeline_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline_test
-
-import (
- "context"
- "sort"
-
- "github.com/stretchr/testify/suite"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
- "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
- "github.com/superseriousbusiness/gotosocial/testrig"
-)
-
-type TimelineStandardTestSuite struct {
- suite.Suite
- state *state.State
-
- testAccounts map[string]*gtsmodel.Account
- testStatuses map[string]*gtsmodel.Status
- highestStatusID string
- lowestStatusID string
-}
-
-func (suite *TimelineStandardTestSuite) SetupSuite() {
- suite.testAccounts = testrig.NewTestAccounts()
- suite.testStatuses = testrig.NewTestStatuses()
-}
-
-func (suite *TimelineStandardTestSuite) SetupTest() {
- suite.state = new(state.State)
-
- suite.state.Caches.Init()
- testrig.StartNoopWorkers(suite.state)
-
- testrig.InitTestConfig()
- testrig.InitTestLog()
-
- suite.state.DB = testrig.NewTestDB(suite.state)
-
- testrig.StartTimelines(
- suite.state,
- visibility.NewFilter(suite.state),
- typeutils.NewConverter(suite.state),
- )
-
- testrig.StandardDBSetup(suite.state.DB, nil)
-}
-
-func (suite *TimelineStandardTestSuite) TearDownTest() {
- testrig.StandardDBTeardown(suite.state.DB)
- testrig.StopWorkers(suite.state)
-}
-
-func (suite *TimelineStandardTestSuite) fillTimeline(timelineID string) {
- // Put testrig statuses in a determinate order
- // since we can't trust a map to keep order.
- statuses := []*gtsmodel.Status{}
- for _, s := range suite.testStatuses {
- statuses = append(statuses, s)
- }
-
- sort.Slice(statuses, func(i, j int) bool {
- return statuses[i].ID > statuses[j].ID
- })
-
- // Statuses are now highest -> lowest.
- suite.highestStatusID = statuses[0].ID
- suite.lowestStatusID = statuses[len(statuses)-1].ID
- if suite.highestStatusID < suite.lowestStatusID {
- suite.FailNow("", "statuses weren't ordered properly by sort")
- }
-
- // Put all test statuses into the timeline; we don't
- // need to be fussy about who sees what for these tests.
- for _, status := range statuses {
- if _, err := suite.state.Timelines.Home.IngestOne(context.Background(), timelineID, status); err != nil {
- suite.FailNow(err.Error())
- }
- }
-}
diff --git a/internal/timeline/timelines.go b/internal/timeline/timelines.go
deleted file mode 100644
index 8291fef5e..000000000
--- a/internal/timeline/timelines.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-type Timelines struct {
- // Home provides access to account home timelines.
- Home Manager
-
- // List provides access to list timelines.
- List Manager
-
- // prevent pass-by-value.
- _ nocopy
-}
-
-// nocopy when embedded will signal linter to
-// error on pass-by-value of parent struct.
-type nocopy struct{}
-
-func (*nocopy) Lock() {}
-
-func (*nocopy) Unlock() {}
diff --git a/internal/timeline/types.go b/internal/timeline/types.go
deleted file mode 100644
index 6243799f5..000000000
--- a/internal/timeline/types.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline
-
-// Timelineable represents any item that can be indexed in a timeline.
-type Timelineable interface {
- GetID() string
- GetAccountID() string
- GetBoostOfID() string
- GetBoostOfAccountID() string
-}
-
-// Preparable represents any item that can be prepared in a timeline.
-type Preparable interface {
- GetID() string
- GetAccountID() string
- GetBoostOfID() string
- GetBoostOfAccountID() string
-}
diff --git a/internal/timeline/unprepare_test.go b/internal/timeline/unprepare_test.go
deleted file mode 100644
index 20bef7537..000000000
--- a/internal/timeline/unprepare_test.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// GoToSocial
-// Copyright (C) GoToSocial Authors admin@gotosocial.org
-// SPDX-License-Identifier: AGPL-3.0-or-later
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package timeline_test
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/suite"
-
- apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
- "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
- "github.com/superseriousbusiness/gotosocial/internal/id"
-)
-
-type UnprepareTestSuite struct {
- TimelineStandardTestSuite
-}
-
-func (suite *UnprepareTestSuite) TestUnprepareFromFave() {
- var (
- ctx = context.Background()
- testAccount = suite.testAccounts["local_account_1"]
- maxID = ""
- sinceID = ""
- minID = ""
- limit = 1
- local = false
- )
-
- suite.fillTimeline(testAccount.ID)
-
- // Get first status from the top (no params).
- statuses, err := suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- if len(statuses) != 1 {
- suite.FailNow("couldn't get top status")
- }
-
- targetStatus := statuses[0].(*apimodel.Status)
-
- // Check fave stats of the top status.
- suite.Equal(0, targetStatus.FavouritesCount)
- suite.False(targetStatus.Favourited)
-
- // Fave the top status from testAccount.
- if err := suite.state.DB.PutStatusFave(ctx, >smodel.StatusFave{
- ID: id.NewULID(),
- AccountID: testAccount.ID,
- TargetAccountID: targetStatus.Account.ID,
- StatusID: targetStatus.ID,
- URI: "https://example.org/some/activity/path",
- }); err != nil {
- suite.FailNow(err.Error())
- }
-
- // Repeat call to get first status from the top.
- // Get first status from the top (no params).
- statuses, err = suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- if len(statuses) != 1 {
- suite.FailNow("couldn't get top status")
- }
-
- targetStatus = statuses[0].(*apimodel.Status)
-
- // We haven't yet uncached/unprepared the status,
- // we've only inserted the fave, so counts should
- // stay the same...
- suite.Equal(0, targetStatus.FavouritesCount)
- suite.False(targetStatus.Favourited)
-
- // Now call unprepare.
- suite.state.Timelines.Home.UnprepareItemFromAllTimelines(ctx, targetStatus.ID)
-
- // Now a Get should trigger a fresh prepare of the
- // target status, and the counts should be updated.
- // Repeat call to get first status from the top.
- // Get first status from the top (no params).
- statuses, err = suite.state.Timelines.Home.GetTimeline(
- ctx,
- testAccount.ID,
- maxID,
- sinceID,
- minID,
- limit,
- local,
- )
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- if len(statuses) != 1 {
- suite.FailNow("couldn't get top status")
- }
-
- targetStatus = statuses[0].(*apimodel.Status)
-
- suite.Equal(1, targetStatus.FavouritesCount)
- suite.True(targetStatus.Favourited)
-}
-
-func TestUnprepareTestSuite(t *testing.T) {
- suite.Run(t, new(UnprepareTestSuite))
-}
diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go
index bed683d27..728876129 100644
--- a/internal/transport/transport_test.go
+++ b/internal/transport/transport_test.go
@@ -25,14 +25,12 @@ import (
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/email"
"github.com/superseriousbusiness/gotosocial/internal/federation"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/media"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
"github.com/superseriousbusiness/gotosocial/internal/storage"
"github.com/superseriousbusiness/gotosocial/internal/transport"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
"github.com/superseriousbusiness/gotosocial/testrig"
)
@@ -77,12 +75,6 @@ func (suite *TransportTestSuite) SetupTest() {
suite.storage = testrig.NewInMemoryStorage()
suite.state.Storage = suite.storage
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- typeutils.NewConverter(&suite.state),
- )
-
suite.mediaManager = testrig.NewTestMediaManager(&suite.state)
suite.federator = testrig.NewTestFederator(&suite.state, testrig.NewTestTransportController(&suite.state, testrig.NewMockHTTPClient(nil, "../../testrig/media")), suite.mediaManager)
suite.sentEmails = make(map[string]string)
diff --git a/internal/typeutils/astointernal.go b/internal/typeutils/astointernal.go
index 59c696f11..da4d2edb7 100644
--- a/internal/typeutils/astointernal.go
+++ b/internal/typeutils/astointernal.go
@@ -512,7 +512,9 @@ func (c *Converter) ASFollowToFollow(ctx context.Context, followable ap.Followab
follow := >smodel.Follow{
URI: uri,
+ Account: origin,
AccountID: origin.ID,
+ TargetAccount: target,
TargetAccountID: target.ID,
}
diff --git a/internal/typeutils/converter_test.go b/internal/typeutils/converter_test.go
index eb7673743..f04709af5 100644
--- a/internal/typeutils/converter_test.go
+++ b/internal/typeutils/converter_test.go
@@ -22,7 +22,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/superseriousbusiness/gotosocial/internal/admin"
"github.com/superseriousbusiness/gotosocial/internal/db"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
"github.com/superseriousbusiness/gotosocial/internal/processing"
"github.com/superseriousbusiness/gotosocial/internal/state"
@@ -529,12 +528,6 @@ func (suite *TypeUtilsTestSuite) TearDownTest() {
// GetProcessor is a utility function that instantiates a processor.
// Useful when a test in the test suite needs to change some state.
func (suite *TypeUtilsTestSuite) GetProcessor() *processing.Processor {
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.typeconverter,
- )
-
httpClient := testrig.NewMockHTTPClient(nil, "../../testrig/media")
transportController := testrig.NewTestTransportController(&suite.state, httpClient)
mediaManager := testrig.NewTestMediaManager(&suite.state)
diff --git a/internal/util/xslices/slices.go b/internal/util/xslices/slices.go
index 1c1c159b2..44235f0ca 100644
--- a/internal/util/xslices/slices.go
+++ b/internal/util/xslices/slices.go
@@ -21,6 +21,16 @@ import (
"slices"
)
+// ToAny converts a slice of any input type
+// to the abstrace empty interface slice type.
+func ToAny[T any](in []T) []any {
+ out := make([]any, len(in))
+ for i, v := range in {
+ out[i] = v
+ }
+ return out
+}
+
// GrowJust increases slice capacity to guarantee
// extra room 'size', where in the case that it does
// need to allocate more it ONLY allocates 'size' extra.
diff --git a/internal/webpush/realsender_test.go b/internal/webpush/realsender_test.go
index 28a5eae95..8cbe7b515 100644
--- a/internal/webpush/realsender_test.go
+++ b/internal/webpush/realsender_test.go
@@ -90,12 +90,6 @@ func (suite *RealSenderStandardTestSuite) SetupTest() {
suite.state.Storage = suite.storage
suite.typeconverter = typeutils.NewConverter(&suite.state)
- testrig.StartTimelines(
- &suite.state,
- visibility.NewFilter(&suite.state),
- suite.typeconverter,
- )
-
suite.httpClient = testrig.NewMockHTTPClient(nil, "../../testrig/media")
suite.httpClient.TestRemotePeople = testrig.NewTestFediPeople()
suite.httpClient.TestRemoteStatuses = testrig.NewTestFediStatuses()
diff --git a/testrig/teststructs.go b/testrig/teststructs.go
index f8eb1b3ed..d438ac2cd 100644
--- a/testrig/teststructs.go
+++ b/testrig/teststructs.go
@@ -69,12 +69,6 @@ func SetupTestStructs(
visFilter := visibility.NewFilter(&state)
intFilter := interaction.NewFilter(&state)
- StartTimelines(
- &state,
- visFilter,
- typeconverter,
- )
-
httpClient := NewMockHTTPClient(nil, rMediaPath)
httpClient.TestRemotePeople = NewTestFediPeople()
httpClient.TestRemoteStatuses = NewTestFediStatuses()
diff --git a/testrig/util.go b/testrig/util.go
index a4bf1bea4..2e9fc31a9 100644
--- a/testrig/util.go
+++ b/testrig/util.go
@@ -20,7 +20,6 @@ package testrig
import (
"bytes"
"context"
- "fmt"
"io"
"mime/multipart"
"net/url"
@@ -30,14 +29,10 @@ import (
"codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-kv/format"
- "github.com/superseriousbusiness/gotosocial/internal/filter/visibility"
"github.com/superseriousbusiness/gotosocial/internal/log"
"github.com/superseriousbusiness/gotosocial/internal/messages"
- tlprocessor "github.com/superseriousbusiness/gotosocial/internal/processing/timeline"
"github.com/superseriousbusiness/gotosocial/internal/processing/workers"
"github.com/superseriousbusiness/gotosocial/internal/state"
- "github.com/superseriousbusiness/gotosocial/internal/timeline"
- "github.com/superseriousbusiness/gotosocial/internal/typeutils"
)
// Starts workers on the provided state using noop processing functions.
@@ -96,28 +91,6 @@ func StopWorkers(state *state.State) {
state.Workers.WebPush.Stop()
}
-func StartTimelines(state *state.State, visFilter *visibility.Filter, converter *typeutils.Converter) {
- state.Timelines.Home = timeline.NewManager(
- tlprocessor.HomeTimelineGrab(state),
- tlprocessor.HomeTimelineFilter(state, visFilter),
- tlprocessor.HomeTimelineStatusPrepare(state, converter),
- tlprocessor.SkipInsert(),
- )
- if err := state.Timelines.Home.Start(); err != nil {
- panic(fmt.Sprintf("error starting home timeline: %s", err))
- }
-
- state.Timelines.List = timeline.NewManager(
- tlprocessor.ListTimelineGrab(state),
- tlprocessor.ListTimelineFilter(state, visFilter),
- tlprocessor.ListTimelineStatusPrepare(state, converter),
- tlprocessor.SkipInsert(),
- )
- if err := state.Timelines.List.Start(); err != nil {
- panic(fmt.Sprintf("error starting list timeline: %s", err))
- }
-}
-
// EqualRequestURIs checks whether inputs have equal request URIs,
// handling cases of url.URL{}, *url.URL{}, string, *string.
func EqualRequestURIs(u1, u2 any) bool {
diff --git a/vendor/codeberg.org/gruf/go-bytes/LICENSE b/vendor/codeberg.org/gruf/go-bytes/LICENSE
deleted file mode 100644
index b7c4417ac..000000000
--- a/vendor/codeberg.org/gruf/go-bytes/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-MIT License
-
-Copyright (c) 2021 gruf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/codeberg.org/gruf/go-bytes/README.md b/vendor/codeberg.org/gruf/go-bytes/README.md
deleted file mode 100644
index fbdf99798..000000000
--- a/vendor/codeberg.org/gruf/go-bytes/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-drop-in replacement for standard "bytes" library
-
-contains alternative Buffer implementation that provides direct access to the
-underlying byte-slice, with some interesting alternative struct methods. provides
-no safety guards, if you pass bad values it will blow up in your face...
-
-and alternative `ToUpper()` and `ToLower()` implementations that use lookup
-tables for improved performance
-
-provides direct call-throughs to most of the "bytes" library functions to facilitate
-this being a direct drop-in. in some time, i may offer alternative implementations
-for other functions too
\ No newline at end of file
diff --git a/vendor/codeberg.org/gruf/go-bytes/buffer.go b/vendor/codeberg.org/gruf/go-bytes/buffer.go
deleted file mode 100644
index 5c3b2cb5b..000000000
--- a/vendor/codeberg.org/gruf/go-bytes/buffer.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package bytes
-
-import (
- "unicode/utf8"
-)
-
-// Buffer is a very simple buffer implementation that allows
-// access to and reslicing of the underlying byte slice.
-type Buffer struct {
- B []byte
-}
-
-func NewBuffer(b []byte) Buffer {
- return Buffer{
- B: b,
- }
-}
-
-func (b *Buffer) Write(p []byte) (int, error) {
- b.Grow(len(p))
- return copy(b.B[b.Len()-len(p):], p), nil
-}
-
-func (b *Buffer) WriteString(s string) (int, error) {
- b.Grow(len(s))
- return copy(b.B[b.Len()-len(s):], s), nil
-}
-
-func (b *Buffer) WriteByte(c byte) error {
- l := b.Len()
- b.Grow(1)
- b.B[l] = c
- return nil
-}
-
-func (b *Buffer) WriteRune(r rune) (int, error) {
- if r < utf8.RuneSelf {
- b.WriteByte(byte(r))
- return 1, nil
- }
-
- l := b.Len()
- b.Grow(utf8.UTFMax)
- n := utf8.EncodeRune(b.B[l:b.Len()], r)
- b.B = b.B[:l+n]
-
- return n, nil
-}
-
-func (b *Buffer) WriteAt(p []byte, start int64) (int, error) {
- b.Grow(len(p) - int(int64(b.Len())-start))
- return copy(b.B[start:], p), nil
-}
-
-func (b *Buffer) WriteStringAt(s string, start int64) (int, error) {
- b.Grow(len(s) - int(int64(b.Len())-start))
- return copy(b.B[start:], s), nil
-}
-
-func (b *Buffer) Truncate(size int) {
- b.B = b.B[:b.Len()-size]
-}
-
-func (b *Buffer) ShiftByte(index int) {
- copy(b.B[index:], b.B[index+1:])
-}
-
-func (b *Buffer) Shift(start int64, size int) {
- copy(b.B[start:], b.B[start+int64(size):])
-}
-
-func (b *Buffer) DeleteByte(index int) {
- b.ShiftByte(index)
- b.Truncate(1)
-}
-
-func (b *Buffer) Delete(start int64, size int) {
- b.Shift(start, size)
- b.Truncate(size)
-}
-
-func (b *Buffer) InsertByte(index int64, c byte) {
- l := b.Len()
- b.Grow(1)
- copy(b.B[index+1:], b.B[index:l])
- b.B[index] = c
-}
-
-func (b *Buffer) Insert(index int64, p []byte) {
- l := b.Len()
- b.Grow(len(p))
- copy(b.B[index+int64(len(p)):], b.B[index:l])
- copy(b.B[index:], p)
-}
-
-func (b *Buffer) Bytes() []byte {
- return b.B
-}
-
-func (b *Buffer) String() string {
- return string(b.B)
-}
-
-func (b *Buffer) StringPtr() string {
- return BytesToString(b.B)
-}
-
-func (b *Buffer) Cap() int {
- return cap(b.B)
-}
-
-func (b *Buffer) Len() int {
- return len(b.B)
-}
-
-func (b *Buffer) Reset() {
- b.B = b.B[:0]
-}
-
-func (b *Buffer) Grow(size int) {
- b.Guarantee(size)
- b.B = b.B[:b.Len()+size]
-}
-
-func (b *Buffer) Guarantee(size int) {
- if size > b.Cap()-b.Len() {
- nb := make([]byte, 2*b.Cap()+size)
- copy(nb, b.B)
- b.B = nb[:b.Len()]
- }
-}
diff --git a/vendor/codeberg.org/gruf/go-bytes/bytes.go b/vendor/codeberg.org/gruf/go-bytes/bytes.go
deleted file mode 100644
index 5fef75d56..000000000
--- a/vendor/codeberg.org/gruf/go-bytes/bytes.go
+++ /dev/null
@@ -1,261 +0,0 @@
-package bytes
-
-import (
- "bytes"
- "reflect"
- "unsafe"
-)
-
-var (
- _ Bytes = &Buffer{}
- _ Bytes = bytesType{}
-)
-
-// Bytes defines a standard way of retrieving the content of a
-// byte buffer of some-kind.
-type Bytes interface {
- // Bytes returns the byte slice content
- Bytes() []byte
-
- // String returns byte slice cast directly to string, this
- // will cause an allocation but comes with the safety of
- // being an immutable Go string
- String() string
-
- // StringPtr returns byte slice cast to string via the unsafe
- // package. This comes with the same caveats of accessing via
- // .Bytes() in that the content is liable change and is NOT
- // immutable, despite being a string type
- StringPtr() string
-}
-
-type bytesType []byte
-
-func (b bytesType) Bytes() []byte {
- return b
-}
-
-func (b bytesType) String() string {
- return string(b)
-}
-
-func (b bytesType) StringPtr() string {
- return BytesToString(b)
-}
-
-// ToBytes casts the provided byte slice as the simplest possible
-// Bytes interface implementation
-func ToBytes(b []byte) Bytes {
- return bytesType(b)
-}
-
-// Copy returns a new copy of slice b, does NOT maintain nil values
-func Copy(b []byte) []byte {
- p := make([]byte, len(b))
- copy(p, b)
- return p
-}
-
-// BytesToString returns byte slice cast to string via the "unsafe" package
-func BytesToString(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// StringToBytes returns the string cast to string via the "unsafe" and "reflect" packages
-func StringToBytes(s string) []byte {
- // thank you to https://github.com/valyala/fasthttp/blob/master/bytesconv.go
- var b []byte
-
- // Get byte + string headers
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
-
- // Manually set bytes to string
- bh.Data = sh.Data
- bh.Len = sh.Len
- bh.Cap = sh.Len
-
- return b
-}
-
-// // InsertByte inserts the supplied byte into the slice at provided position
-// func InsertByte(b []byte, at int, c byte) []byte {
-// return append(append(b[:at], c), b[at:]...)
-// }
-
-// // Insert inserts the supplied byte slice into the slice at provided position
-// func Insert(b []byte, at int, s []byte) []byte {
-// return append(append(b[:at], s...), b[at:]...)
-// }
-
-// ToUpper offers a faster ToUpper implementation using a lookup table
-func ToUpper(b []byte) {
- for i := 0; i < len(b); i++ {
- c := &b[i]
- *c = toUpperTable[*c]
- }
-}
-
-// ToLower offers a faster ToLower implementation using a lookup table
-func ToLower(b []byte) {
- for i := 0; i < len(b); i++ {
- c := &b[i]
- *c = toLowerTable[*c]
- }
-}
-
-// HasBytePrefix returns whether b has the provided byte prefix
-func HasBytePrefix(b []byte, c byte) bool {
- return (len(b) > 0) && (b[0] == c)
-}
-
-// HasByteSuffix returns whether b has the provided byte suffix
-func HasByteSuffix(b []byte, c byte) bool {
- return (len(b) > 0) && (b[len(b)-1] == c)
-}
-
-// HasBytePrefix returns b without the provided leading byte
-func TrimBytePrefix(b []byte, c byte) []byte {
- if HasBytePrefix(b, c) {
- return b[1:]
- }
- return b
-}
-
-// TrimByteSuffix returns b without the provided trailing byte
-func TrimByteSuffix(b []byte, c byte) []byte {
- if HasByteSuffix(b, c) {
- return b[:len(b)-1]
- }
- return b
-}
-
-// Compare is a direct call-through to standard library bytes.Compare()
-func Compare(b, s []byte) int {
- return bytes.Compare(b, s)
-}
-
-// Contains is a direct call-through to standard library bytes.Contains()
-func Contains(b, s []byte) bool {
- return bytes.Contains(b, s)
-}
-
-// TrimPrefix is a direct call-through to standard library bytes.TrimPrefix()
-func TrimPrefix(b, s []byte) []byte {
- return bytes.TrimPrefix(b, s)
-}
-
-// TrimSuffix is a direct call-through to standard library bytes.TrimSuffix()
-func TrimSuffix(b, s []byte) []byte {
- return bytes.TrimSuffix(b, s)
-}
-
-// Equal is a direct call-through to standard library bytes.Equal()
-func Equal(b, s []byte) bool {
- return bytes.Equal(b, s)
-}
-
-// EqualFold is a direct call-through to standard library bytes.EqualFold()
-func EqualFold(b, s []byte) bool {
- return bytes.EqualFold(b, s)
-}
-
-// Fields is a direct call-through to standard library bytes.Fields()
-func Fields(b []byte) [][]byte {
- return bytes.Fields(b)
-}
-
-// FieldsFunc is a direct call-through to standard library bytes.FieldsFunc()
-func FieldsFunc(b []byte, fn func(rune) bool) [][]byte {
- return bytes.FieldsFunc(b, fn)
-}
-
-// HasPrefix is a direct call-through to standard library bytes.HasPrefix()
-func HasPrefix(b, s []byte) bool {
- return bytes.HasPrefix(b, s)
-}
-
-// HasSuffix is a direct call-through to standard library bytes.HasSuffix()
-func HasSuffix(b, s []byte) bool {
- return bytes.HasSuffix(b, s)
-}
-
-// Index is a direct call-through to standard library bytes.Index()
-func Index(b, s []byte) int {
- return bytes.Index(b, s)
-}
-
-// IndexByte is a direct call-through to standard library bytes.IndexByte()
-func IndexByte(b []byte, c byte) int {
- return bytes.IndexByte(b, c)
-}
-
-// IndexAny is a direct call-through to standard library bytes.IndexAny()
-func IndexAny(b []byte, s string) int {
- return bytes.IndexAny(b, s)
-}
-
-// IndexRune is a direct call-through to standard library bytes.IndexRune()
-func IndexRune(b []byte, r rune) int {
- return bytes.IndexRune(b, r)
-}
-
-// IndexFunc is a direct call-through to standard library bytes.IndexFunc()
-func IndexFunc(b []byte, fn func(rune) bool) int {
- return bytes.IndexFunc(b, fn)
-}
-
-// LastIndex is a direct call-through to standard library bytes.LastIndex()
-func LastIndex(b, s []byte) int {
- return bytes.LastIndex(b, s)
-}
-
-// LastIndexByte is a direct call-through to standard library bytes.LastIndexByte()
-func LastIndexByte(b []byte, c byte) int {
- return bytes.LastIndexByte(b, c)
-}
-
-// LastIndexAny is a direct call-through to standard library bytes.LastIndexAny()
-func LastIndexAny(b []byte, s string) int {
- return bytes.LastIndexAny(b, s)
-}
-
-// LastIndexFunc is a direct call-through to standard library bytes.LastIndexFunc()
-func LastIndexFunc(b []byte, fn func(rune) bool) int {
- return bytes.LastIndexFunc(b, fn)
-}
-
-// Replace is a direct call-through to standard library bytes.Replace()
-func Replace(b, s, r []byte, c int) []byte {
- return bytes.Replace(b, s, r, c)
-}
-
-// ReplaceAll is a direct call-through to standard library bytes.ReplaceAll()
-func ReplaceAll(b, s, r []byte) []byte {
- return bytes.ReplaceAll(b, s, r)
-}
-
-// Split is a direct call-through to standard library bytes.Split()
-func Split(b, s []byte) [][]byte {
- return bytes.Split(b, s)
-}
-
-// SplitAfter is a direct call-through to standard library bytes.SplitAfter()
-func SplitAfter(b, s []byte) [][]byte {
- return bytes.SplitAfter(b, s)
-}
-
-// SplitN is a direct call-through to standard library bytes.SplitN()
-func SplitN(b, s []byte, c int) [][]byte {
- return bytes.SplitN(b, s, c)
-}
-
-// SplitAfterN is a direct call-through to standard library bytes.SplitAfterN()
-func SplitAfterN(b, s []byte, c int) [][]byte {
- return bytes.SplitAfterN(b, s, c)
-}
-
-// NewReader is a direct call-through to standard library bytes.NewReader()
-func NewReader(b []byte) *bytes.Reader {
- return bytes.NewReader(b)
-}
diff --git a/vendor/codeberg.org/gruf/go-bytes/bytesconv_table.go b/vendor/codeberg.org/gruf/go-bytes/bytesconv_table.go
deleted file mode 100644
index 3d3058ada..000000000
--- a/vendor/codeberg.org/gruf/go-bytes/bytesconv_table.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package bytes
-
-// Code generated by go run bytesconv_table_gen.go; DO NOT EDIT.
-// See bytesconv_table_gen.go for more information about these tables.
-//
-// Source: https://github.com/valyala/fasthttp/blob/master/bytes_table_gen.go
-
-const (
- toLowerTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@abcdefghijklmnopqrstuvwxyz[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- toUpperTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
-)
diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go
index 23d3ffaee..acb58f7a8 100644
--- a/vendor/codeberg.org/gruf/go-structr/index.go
+++ b/vendor/codeberg.org/gruf/go-structr/index.go
@@ -1,6 +1,7 @@
package structr
import (
+ "fmt"
"os"
"reflect"
"strings"
@@ -222,10 +223,10 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
buf.B = buf.B[:0]
if len(parts) != len(i.fields) {
- panicf("incorrect number key parts: want=%d received=%d",
+ panic(fmt.Sprintf("incorrect number key parts: want=%d received=%d",
len(i.fields),
len(parts),
- )
+ ))
}
if !allow_zero(i.flags) {
for x, field := range i.fields {
diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go
index b75dfeba0..1d379b70d 100644
--- a/vendor/codeberg.org/gruf/go-structr/runtime.go
+++ b/vendor/codeberg.org/gruf/go-structr/runtime.go
@@ -70,7 +70,7 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
name := names[0]
names = names[1:]
if !is_exported(name) {
- panicf("field is not exported: %s", name)
+ panic(fmt.Sprintf("field is not exported: %s", name))
}
return name
}
@@ -94,7 +94,7 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
// Check for valid struct type.
if t.Kind() != reflect.Struct {
- panicf("field %s is not struct (or ptr-to): %s", t, name)
+ panic(fmt.Sprintf("field %s is not struct (or ptr-to): %s", t, name))
}
var ok bool
@@ -102,7 +102,7 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
// Look for next field by name.
field, ok = t.FieldByName(name)
if !ok {
- panicf("unknown field: %s", name)
+ panic(fmt.Sprintf("unknown field: %s", name))
}
// Set next offset value.
@@ -258,11 +258,6 @@ func eface_data(a any) unsafe.Pointer {
return (*eface)(unsafe.Pointer(&a)).data
}
-// panicf provides a panic with string formatting.
-func panicf(format string, args ...any) {
- panic(fmt.Sprintf(format, args...))
-}
-
// assert can be called to indicated a block
// of code should not be able to be reached,
// it returns a BUG report with callsite.
diff --git a/vendor/codeberg.org/gruf/go-structr/timeline.go b/vendor/codeberg.org/gruf/go-structr/timeline.go
index 0eb1e3aa5..0014e69ee 100644
--- a/vendor/codeberg.org/gruf/go-structr/timeline.go
+++ b/vendor/codeberg.org/gruf/go-structr/timeline.go
@@ -190,7 +190,8 @@ func (t *Timeline[T, PK]) Select(min, max *PK, length *int, dir Direction) (valu
// Insert will insert the given values into the timeline,
// calling any set invalidate hook on each inserted value.
-func (t *Timeline[T, PK]) Insert(values ...T) {
+// Returns current list length after performing inserts.
+func (t *Timeline[T, PK]) Insert(values ...T) int {
// Acquire lock.
t.mutex.Lock()
@@ -269,6 +270,10 @@ func (t *Timeline[T, PK]) Insert(values ...T) {
// Get func ptrs.
invalid := t.invalid
+ // Get length AFTER
+ // insert to return.
+ len := t.list.len
+
// Done with lock.
t.mutex.Unlock()
@@ -279,6 +284,8 @@ func (t *Timeline[T, PK]) Insert(values ...T) {
invalid(value)
}
}
+
+ return len
}
// Invalidate invalidates all entries stored in index under given keys.
@@ -336,8 +343,8 @@ func (t *Timeline[T, PK]) Invalidate(index *Index, keys ...Key) {
//
// Please note that the entire Timeline{} will be locked for the duration of the range
// operation, i.e. from the beginning of the first yield call until the end of the last.
-func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) {
- return func(yield func(T) bool) {
+func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(index int, value T) bool) {
+ return func(yield func(int, T) bool) {
if t.copy == nil {
panic("not initialized")
} else if yield == nil {
@@ -348,7 +355,9 @@ func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) {
t.mutex.Lock()
defer t.mutex.Unlock()
+ var i int
switch dir {
+
case Asc:
// Iterate through linked list from bottom (i.e. tail).
for prev := t.list.tail; prev != nil; prev = prev.prev {
@@ -360,9 +369,12 @@ func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) {
value := t.copy(item.data.(T))
// Pass to given function.
- if !yield(value) {
+ if !yield(i, value) {
break
}
+
+ // Iter
+ i++
}
case Desc:
@@ -376,9 +388,12 @@ func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) {
value := t.copy(item.data.(T))
// Pass to given function.
- if !yield(value) {
+ if !yield(i, value) {
break
}
+
+ // Iter
+ i++
}
}
}
@@ -390,8 +405,8 @@ func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(T) bool) {
//
// Please note that the entire Timeline{} will be locked for the duration of the range
// operation, i.e. from the beginning of the first yield call until the end of the last.
-func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(T) bool) {
- return func(yield func(T) bool) {
+func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(index int, value T) bool) {
+ return func(yield func(int, T) bool) {
if t.copy == nil {
panic("not initialized")
} else if yield == nil {
@@ -402,7 +417,9 @@ func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(T) bool) {
t.mutex.Lock()
defer t.mutex.Unlock()
+ var i int
switch dir {
+
case Asc:
// Iterate through linked list from bottom (i.e. tail).
for prev := t.list.tail; prev != nil; prev = prev.prev {
@@ -411,9 +428,12 @@ func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(T) bool) {
item := (*timeline_item)(prev.data)
// Pass to given function.
- if !yield(item.data.(T)) {
+ if !yield(i, item.data.(T)) {
break
}
+
+ // Iter
+ i++
}
case Desc:
@@ -424,9 +444,12 @@ func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(T) bool) {
item := (*timeline_item)(next.data)
// Pass to given function.
- if !yield(item.data.(T)) {
+ if !yield(i, item.data.(T)) {
break
}
+
+ // Iter
+ i++
}
}
}
@@ -1033,6 +1056,9 @@ indexing:
// checking for collisions.
if !idx.add(key, i_item) {
+ // This key already appears
+ // in this unique index. So
+ // drop new timeline item.
t.delete(t_item)
free_buffer(buf)
return last
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ba0a1c7e6..3824b0a5c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -215,9 +215,6 @@ code.superseriousbusiness.org/oauth2/v4/generates
code.superseriousbusiness.org/oauth2/v4/manage
code.superseriousbusiness.org/oauth2/v4/models
code.superseriousbusiness.org/oauth2/v4/server
-# codeberg.org/gruf/go-bytes v1.0.2
-## explicit; go 1.14
-codeberg.org/gruf/go-bytes
# codeberg.org/gruf/go-bytesize v1.0.3
## explicit; go 1.17
codeberg.org/gruf/go-bytesize
@@ -280,7 +277,7 @@ codeberg.org/gruf/go-storage/disk
codeberg.org/gruf/go-storage/internal
codeberg.org/gruf/go-storage/memory
codeberg.org/gruf/go-storage/s3
-# codeberg.org/gruf/go-structr v0.9.6
+# codeberg.org/gruf/go-structr v0.9.7
## explicit; go 1.22
codeberg.org/gruf/go-structr
# github.com/DmitriyVTitov/size v1.5.0