mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-12-07 13:48:08 -06:00
[chore] consolidate caching libraries (#704)
* add miekg/dns dependency * set/validate accountDomain * move finger to dereferencer * totally break GetRemoteAccount * start reworking finger func a bit * start reworking getRemoteAccount a bit * move mention parts to namestring * rework webfingerget * use util function to extract webfinger parts * use accountDomain * rework finger again, final form * just a real nasty commit, the worst * remove refresh from account * use new ASRepToAccount signature * fix incorrect debug call * fix for new getRemoteAccount * rework GetRemoteAccount * start updating tests to remove repetition * break a lot of tests Move shared test logic into the testrig, rather than having it scattered all over the place. This allows us to just mock the transport controller once, and have all tests use it (unless they need not to for some other reason). * fix up tests to use main mock httpclient * webfinger only if necessary * cheeky linting with the lads * update mentionName regex recognize instance accounts * don't finger instance accounts * test webfinger part extraction * increase default worker count to 4 per cpu * don't repeat regex parsing * final search for discovered accountDomain * be more permissive in namestring lookup * add more extraction tests * simplify GetParseMentionFunc * skip long search if local account * fix broken test * consolidate to all use same caching libraries Signed-off-by: kim <grufwub@gmail.com> * perform more caching in the database layer Signed-off-by: kim <grufwub@gmail.com> * remove ASNote cache Signed-off-by: kim <grufwub@gmail.com> * update cache library, improve db tracing hooks Signed-off-by: kim <grufwub@gmail.com> * return ErrNoEntries if no account status IDs found, small formatting changes Signed-off-by: kim <grufwub@gmail.com> * fix tests, thanks tobi! Signed-off-by: kim <grufwub@gmail.com> Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
This commit is contained in:
parent
211266c072
commit
7cc40302a5
67 changed files with 3159 additions and 1244 deletions
114
internal/cache/account.go
vendored
114
internal/cache/account.go
vendored
|
|
@ -1,103 +1,62 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ReneKroon/ttlcache"
|
||||
"github.com/sirupsen/logrus"
|
||||
"codeberg.org/gruf/go-cache/v2"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
)
|
||||
|
||||
// AccountCache is a wrapper around ttlcache.Cache to provide URL and URI lookups for gtsmodel.Account
|
||||
// AccountCache is a cache wrapper to provide URL and URI lookups for gtsmodel.Account
|
||||
type AccountCache struct {
|
||||
cache *ttlcache.Cache // map of IDs -> cached accounts
|
||||
urls map[string]string // map of account URLs -> IDs
|
||||
uris map[string]string // map of account URIs -> IDs
|
||||
mutex sync.Mutex
|
||||
cache cache.LookupCache[string, string, *gtsmodel.Account]
|
||||
}
|
||||
|
||||
// NewAccountCache returns a new instantiated AccountCache object
|
||||
func NewAccountCache() *AccountCache {
|
||||
c := AccountCache{
|
||||
cache: ttlcache.NewCache(),
|
||||
urls: make(map[string]string, 100),
|
||||
uris: make(map[string]string, 100),
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
c := &AccountCache{}
|
||||
c.cache = cache.NewLookup(cache.LookupCfg[string, string, *gtsmodel.Account]{
|
||||
RegisterLookups: func(lm *cache.LookupMap[string, string]) {
|
||||
lm.RegisterLookup("uri")
|
||||
lm.RegisterLookup("url")
|
||||
},
|
||||
|
||||
// Set callback to purge lookup maps on expiration
|
||||
c.cache.SetExpirationCallback(func(key string, value interface{}) {
|
||||
account, ok := value.(*gtsmodel.Account)
|
||||
if !ok {
|
||||
logrus.Panicf("AccountCache could not assert entry with key %s to *gtsmodel.Account", key)
|
||||
}
|
||||
AddLookups: func(lm *cache.LookupMap[string, string], acc *gtsmodel.Account) {
|
||||
if uri := acc.URI; uri != "" {
|
||||
lm.Set("uri", uri, acc.ID)
|
||||
}
|
||||
if url := acc.URL; url != "" {
|
||||
lm.Set("url", url, acc.ID)
|
||||
}
|
||||
},
|
||||
|
||||
c.mutex.Lock()
|
||||
delete(c.urls, account.URL)
|
||||
delete(c.uris, account.URI)
|
||||
c.mutex.Unlock()
|
||||
DeleteLookups: func(lm *cache.LookupMap[string, string], acc *gtsmodel.Account) {
|
||||
if uri := acc.URI; uri != "" {
|
||||
lm.Delete("uri", uri)
|
||||
}
|
||||
if url := acc.URL; url != "" {
|
||||
lm.Delete("url", url)
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return &c
|
||||
c.cache.SetTTL(time.Minute*5, false)
|
||||
c.cache.Start(time.Second * 10)
|
||||
return c
|
||||
}
|
||||
|
||||
// GetByID attempts to fetch a account from the cache by its ID, you will receive a copy for thread-safety
|
||||
func (c *AccountCache) GetByID(id string) (*gtsmodel.Account, bool) {
|
||||
c.mutex.Lock()
|
||||
account, ok := c.getByID(id)
|
||||
c.mutex.Unlock()
|
||||
return account, ok
|
||||
return c.cache.Get(id)
|
||||
}
|
||||
|
||||
// GetByURL attempts to fetch a account from the cache by its URL, you will receive a copy for thread-safety
|
||||
func (c *AccountCache) GetByURL(url string) (*gtsmodel.Account, bool) {
|
||||
// Perform safe ID lookup
|
||||
c.mutex.Lock()
|
||||
id, ok := c.urls[url]
|
||||
|
||||
// Not found, unlock early
|
||||
if !ok {
|
||||
c.mutex.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Attempt account lookup
|
||||
account, ok := c.getByID(id)
|
||||
c.mutex.Unlock()
|
||||
return account, ok
|
||||
return c.cache.GetBy("url", url)
|
||||
}
|
||||
|
||||
// GetByURI attempts to fetch a account from the cache by its URI, you will receive a copy for thread-safety
|
||||
func (c *AccountCache) GetByURI(uri string) (*gtsmodel.Account, bool) {
|
||||
// Perform safe ID lookup
|
||||
c.mutex.Lock()
|
||||
id, ok := c.uris[uri]
|
||||
|
||||
// Not found, unlock early
|
||||
if !ok {
|
||||
c.mutex.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Attempt account lookup
|
||||
account, ok := c.getByID(id)
|
||||
c.mutex.Unlock()
|
||||
return account, ok
|
||||
}
|
||||
|
||||
// getByID performs an unsafe (no mutex locks) lookup of account by ID, returning a copy of account in cache
|
||||
func (c *AccountCache) getByID(id string) (*gtsmodel.Account, bool) {
|
||||
v, ok := c.cache.Get(id)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
a, ok := v.(*gtsmodel.Account)
|
||||
if !ok {
|
||||
panic("account cache entry was not an account")
|
||||
}
|
||||
|
||||
return copyAccount(a), true
|
||||
return c.cache.GetBy("uri", uri)
|
||||
}
|
||||
|
||||
// Put places a account in the cache, ensuring that the object place is a copy for thread-safety
|
||||
|
|
@ -105,16 +64,7 @@ func (c *AccountCache) Put(account *gtsmodel.Account) {
|
|||
if account == nil || account.ID == "" {
|
||||
panic("invalid account")
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
c.cache.Set(account.ID, copyAccount(account))
|
||||
if account.URL != "" {
|
||||
c.urls[account.URL] = account.ID
|
||||
}
|
||||
if account.URI != "" {
|
||||
c.uris[account.URI] = account.ID
|
||||
}
|
||||
c.mutex.Unlock()
|
||||
}
|
||||
|
||||
// copyAccount performs a surface-level copy of account, only keeping attached IDs intact, not the objects.
|
||||
|
|
|
|||
45
internal/cache/cache.go
vendored
45
internal/cache/cache.go
vendored
|
|
@ -1,45 +0,0 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ReneKroon/ttlcache"
|
||||
)
|
||||
|
||||
// Cache defines an in-memory cache that is safe to be wiped when the application is restarted
|
||||
type Cache interface {
|
||||
Store(k string, v interface{}) error
|
||||
Fetch(k string) (interface{}, error)
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
c *ttlcache.Cache
|
||||
}
|
||||
|
||||
// New returns a new in-memory cache.
|
||||
func New() Cache {
|
||||
c := ttlcache.NewCache()
|
||||
c.SetTTL(5 * time.Minute)
|
||||
cache := &cache{
|
||||
c: c,
|
||||
}
|
||||
return cache
|
||||
}
|
||||
27
internal/cache/error.go
vendored
27
internal/cache/error.go
vendored
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import "errors"
|
||||
|
||||
// Error models an error returned by the in-memory cache.
|
||||
type Error error
|
||||
|
||||
// ErrNotFound means that a value for the requested key was not found in the cache.
|
||||
var ErrNotFound = errors.New("value not found in cache")
|
||||
28
internal/cache/fetch.go
vendored
28
internal/cache/fetch.go
vendored
|
|
@ -1,28 +0,0 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
func (c *cache) Fetch(k string) (interface{}, error) {
|
||||
i, stored := c.c.Get(k)
|
||||
if !stored {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
114
internal/cache/status.go
vendored
114
internal/cache/status.go
vendored
|
|
@ -1,103 +1,62 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ReneKroon/ttlcache"
|
||||
"github.com/sirupsen/logrus"
|
||||
"codeberg.org/gruf/go-cache/v2"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
)
|
||||
|
||||
// StatusCache is a wrapper around ttlcache.Cache to provide URL and URI lookups for gtsmodel.Status
|
||||
// StatusCache is a cache wrapper to provide URL and URI lookups for gtsmodel.Status
|
||||
type StatusCache struct {
|
||||
cache *ttlcache.Cache // map of IDs -> cached statuses
|
||||
urls map[string]string // map of status URLs -> IDs
|
||||
uris map[string]string // map of status URIs -> IDs
|
||||
mutex sync.Mutex
|
||||
cache cache.LookupCache[string, string, *gtsmodel.Status]
|
||||
}
|
||||
|
||||
// NewStatusCache returns a new instantiated statusCache object
|
||||
func NewStatusCache() *StatusCache {
|
||||
c := StatusCache{
|
||||
cache: ttlcache.NewCache(),
|
||||
urls: make(map[string]string, 100),
|
||||
uris: make(map[string]string, 100),
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
c := &StatusCache{}
|
||||
c.cache = cache.NewLookup(cache.LookupCfg[string, string, *gtsmodel.Status]{
|
||||
RegisterLookups: func(lm *cache.LookupMap[string, string]) {
|
||||
lm.RegisterLookup("uri")
|
||||
lm.RegisterLookup("url")
|
||||
},
|
||||
|
||||
// Set callback to purge lookup maps on expiration
|
||||
c.cache.SetExpirationCallback(func(key string, value interface{}) {
|
||||
status, ok := value.(*gtsmodel.Status)
|
||||
if !ok {
|
||||
logrus.Panicf("StatusCache could not assert entry with key %s to *gtsmodel.Status", key)
|
||||
}
|
||||
AddLookups: func(lm *cache.LookupMap[string, string], status *gtsmodel.Status) {
|
||||
if uri := status.URI; uri != "" {
|
||||
lm.Set("uri", uri, status.ID)
|
||||
}
|
||||
if url := status.URL; url != "" {
|
||||
lm.Set("url", url, status.ID)
|
||||
}
|
||||
},
|
||||
|
||||
c.mutex.Lock()
|
||||
delete(c.urls, status.URL)
|
||||
delete(c.uris, status.URI)
|
||||
c.mutex.Unlock()
|
||||
DeleteLookups: func(lm *cache.LookupMap[string, string], status *gtsmodel.Status) {
|
||||
if uri := status.URI; uri != "" {
|
||||
lm.Delete("uri", uri)
|
||||
}
|
||||
if url := status.URL; url != "" {
|
||||
lm.Delete("url", url)
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return &c
|
||||
c.cache.SetTTL(time.Minute*5, false)
|
||||
c.cache.Start(time.Second * 10)
|
||||
return c
|
||||
}
|
||||
|
||||
// GetByID attempts to fetch a status from the cache by its ID, you will receive a copy for thread-safety
|
||||
func (c *StatusCache) GetByID(id string) (*gtsmodel.Status, bool) {
|
||||
c.mutex.Lock()
|
||||
status, ok := c.getByID(id)
|
||||
c.mutex.Unlock()
|
||||
return status, ok
|
||||
return c.cache.Get(id)
|
||||
}
|
||||
|
||||
// GetByURL attempts to fetch a status from the cache by its URL, you will receive a copy for thread-safety
|
||||
func (c *StatusCache) GetByURL(url string) (*gtsmodel.Status, bool) {
|
||||
// Perform safe ID lookup
|
||||
c.mutex.Lock()
|
||||
id, ok := c.urls[url]
|
||||
|
||||
// Not found, unlock early
|
||||
if !ok {
|
||||
c.mutex.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Attempt status lookup
|
||||
status, ok := c.getByID(id)
|
||||
c.mutex.Unlock()
|
||||
return status, ok
|
||||
return c.cache.GetBy("url", url)
|
||||
}
|
||||
|
||||
// GetByURI attempts to fetch a status from the cache by its URI, you will receive a copy for thread-safety
|
||||
func (c *StatusCache) GetByURI(uri string) (*gtsmodel.Status, bool) {
|
||||
// Perform safe ID lookup
|
||||
c.mutex.Lock()
|
||||
id, ok := c.uris[uri]
|
||||
|
||||
// Not found, unlock early
|
||||
if !ok {
|
||||
c.mutex.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Attempt status lookup
|
||||
status, ok := c.getByID(id)
|
||||
c.mutex.Unlock()
|
||||
return status, ok
|
||||
}
|
||||
|
||||
// getByID performs an unsafe (no mutex locks) lookup of status by ID, returning a copy of status in cache
|
||||
func (c *StatusCache) getByID(id string) (*gtsmodel.Status, bool) {
|
||||
v, ok := c.cache.Get(id)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
s, ok := v.(*gtsmodel.Status)
|
||||
if !ok {
|
||||
panic("status cache entry was not a status")
|
||||
}
|
||||
|
||||
return copyStatus(s), true
|
||||
return c.cache.GetBy("uri", uri)
|
||||
}
|
||||
|
||||
// Put places a status in the cache, ensuring that the object place is a copy for thread-safety
|
||||
|
|
@ -105,16 +64,7 @@ func (c *StatusCache) Put(status *gtsmodel.Status) {
|
|||
if status == nil || status.ID == "" {
|
||||
panic("invalid status")
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
c.cache.Set(status.ID, copyStatus(status))
|
||||
if status.URL != "" {
|
||||
c.urls[status.URL] = status.ID
|
||||
}
|
||||
if status.URI != "" {
|
||||
c.uris[status.URI] = status.ID
|
||||
}
|
||||
c.mutex.Unlock()
|
||||
}
|
||||
|
||||
// copyStatus performs a surface-level copy of status, only keeping attached IDs intact, not the objects.
|
||||
|
|
|
|||
24
internal/cache/store.go
vendored
24
internal/cache/store.go
vendored
|
|
@ -1,24 +0,0 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
func (c *cache) Store(k string, v interface{}) error {
|
||||
c.c.Set(k, v)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -35,8 +35,9 @@ import (
|
|||
)
|
||||
|
||||
type accountDB struct {
|
||||
conn *DBConn
|
||||
cache *cache.AccountCache
|
||||
conn *DBConn
|
||||
cache *cache.AccountCache
|
||||
status *statusDB
|
||||
}
|
||||
|
||||
func (a *accountDB) newAccountQ(account *gtsmodel.Account) *bun.SelectQuery {
|
||||
|
|
@ -232,11 +233,12 @@ func (a *accountDB) CountAccountStatuses(ctx context.Context, accountID string)
|
|||
}
|
||||
|
||||
func (a *accountDB) GetAccountStatuses(ctx context.Context, accountID string, limit int, excludeReplies bool, excludeReblogs bool, maxID string, minID string, pinnedOnly bool, mediaOnly bool, publicOnly bool) ([]*gtsmodel.Status, db.Error) {
|
||||
statuses := []*gtsmodel.Status{}
|
||||
statusIDs := []string{}
|
||||
|
||||
q := a.conn.
|
||||
NewSelect().
|
||||
Model(&statuses).
|
||||
Table("statuses").
|
||||
Column("id").
|
||||
Order("id DESC")
|
||||
|
||||
if accountID != "" {
|
||||
|
|
@ -295,14 +297,30 @@ func (a *accountDB) GetAccountStatuses(ctx context.Context, accountID string, li
|
|||
q = q.Where("visibility = ?", gtsmodel.VisibilityPublic)
|
||||
}
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
if err := q.Scan(ctx, &statusIDs); err != nil {
|
||||
return nil, a.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
if len(statuses) == 0 {
|
||||
// Catch case of no statuses early
|
||||
if len(statusIDs) == 0 {
|
||||
return nil, db.ErrNoEntries
|
||||
}
|
||||
|
||||
// Allocate return slice (will be at most len statusIDS)
|
||||
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
||||
|
||||
for _, id := range statusIDs {
|
||||
// Fetch from status from database by ID
|
||||
status, err := a.status.GetStatusByID(ctx, id)
|
||||
if err != nil {
|
||||
logrus.Errorf("GetAccountStatuses: error getting status %q: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Append to return slice
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ReneKroon/ttlcache"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/stdlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -46,6 +45,7 @@ import (
|
|||
"github.com/uptrace/bun/dialect/sqlitedialect"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
|
||||
grufcache "codeberg.org/gruf/go-cache/v2"
|
||||
"modernc.org/sqlite"
|
||||
)
|
||||
|
||||
|
|
@ -136,11 +136,8 @@ func NewBunDBService(ctx context.Context) (db.DB, error) {
|
|||
return nil, fmt.Errorf("database type %s not supported for bundb", dbType)
|
||||
}
|
||||
|
||||
// add a hook to log queries and the time they take
|
||||
// only do this for logging where performance isn't 1st concern
|
||||
if logrus.GetLevel() >= logrus.DebugLevel && config.GetLogDbQueries() {
|
||||
conn.DB.AddQueryHook(newDebugQueryHook())
|
||||
}
|
||||
// Add database query hook
|
||||
conn.DB.AddQueryHook(queryHook{})
|
||||
|
||||
// table registration is needed for many-to-many, see:
|
||||
// https://bun.uptrace.dev/orm/many-to-many-relation/
|
||||
|
|
@ -155,7 +152,27 @@ func NewBunDBService(ctx context.Context) (db.DB, error) {
|
|||
return nil, fmt.Errorf("db migration error: %s", err)
|
||||
}
|
||||
|
||||
// Create DB structs that require ptrs to each other
|
||||
accounts := &accountDB{conn: conn, cache: cache.NewAccountCache()}
|
||||
status := &statusDB{conn: conn, cache: cache.NewStatusCache()}
|
||||
timeline := &timelineDB{conn: conn}
|
||||
|
||||
// Setup DB cross-referencing
|
||||
accounts.status = status
|
||||
status.accounts = accounts
|
||||
timeline.status = status
|
||||
|
||||
// Prepare mentions cache
|
||||
// TODO: move into internal/cache
|
||||
mentionCache := grufcache.New[string, *gtsmodel.Mention]()
|
||||
mentionCache.SetTTL(time.Minute*5, false)
|
||||
mentionCache.Start(time.Second * 10)
|
||||
|
||||
// Prepare notifications cache
|
||||
// TODO: move into internal/cache
|
||||
notifCache := grufcache.New[string, *gtsmodel.Notification]()
|
||||
notifCache.SetTTL(time.Minute*5, false)
|
||||
notifCache.Start(time.Second * 10)
|
||||
|
||||
ps := &bunDBService{
|
||||
Account: accounts,
|
||||
|
|
@ -179,11 +196,11 @@ func NewBunDBService(ctx context.Context) (db.DB, error) {
|
|||
},
|
||||
Mention: &mentionDB{
|
||||
conn: conn,
|
||||
cache: ttlcache.NewCache(),
|
||||
cache: mentionCache,
|
||||
},
|
||||
Notification: ¬ificationDB{
|
||||
conn: conn,
|
||||
cache: ttlcache.NewCache(),
|
||||
cache: notifCache,
|
||||
},
|
||||
Relationship: &relationshipDB{
|
||||
conn: conn,
|
||||
|
|
@ -191,15 +208,9 @@ func NewBunDBService(ctx context.Context) (db.DB, error) {
|
|||
Session: &sessionDB{
|
||||
conn: conn,
|
||||
},
|
||||
Status: &statusDB{
|
||||
conn: conn,
|
||||
cache: cache.NewStatusCache(),
|
||||
accounts: accounts,
|
||||
},
|
||||
Timeline: &timelineDB{
|
||||
conn: conn,
|
||||
},
|
||||
conn: conn,
|
||||
Status: status,
|
||||
Timeline: timeline,
|
||||
conn: conn,
|
||||
}
|
||||
|
||||
// we can confidently return this useable service now
|
||||
|
|
|
|||
|
|
@ -11,13 +11,11 @@ import (
|
|||
|
||||
// DBConn wrapps a bun.DB conn to provide SQL-type specific additional functionality
|
||||
type DBConn struct {
|
||||
// TODO: move *Config here, no need to be in each struct type
|
||||
|
||||
errProc func(error) db.Error // errProc is the SQL-type specific error processor
|
||||
*bun.DB // DB is the underlying bun.DB connection
|
||||
}
|
||||
|
||||
// WrapDBConn @TODO
|
||||
// WrapDBConn wraps a bun DB connection to provide our own error processing dependent on DB dialect.
|
||||
func WrapDBConn(dbConn *bun.DB) *DBConn {
|
||||
var errProc func(error) db.Error
|
||||
switch dbConn.Dialect().Name() {
|
||||
|
|
@ -36,21 +34,31 @@ func WrapDBConn(dbConn *bun.DB) *DBConn {
|
|||
|
||||
// RunInTx wraps execution of the supplied transaction function.
|
||||
func (conn *DBConn) RunInTx(ctx context.Context, fn func(bun.Tx) error) db.Error {
|
||||
// Acquire a new transaction
|
||||
tx, err := conn.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return conn.ProcessError(err)
|
||||
}
|
||||
return conn.ProcessError(func() error {
|
||||
// Acquire a new transaction
|
||||
tx, err := conn.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform supplied transaction
|
||||
if err = fn(tx); err != nil {
|
||||
tx.Rollback() //nolint
|
||||
return conn.ProcessError(err)
|
||||
}
|
||||
var done bool
|
||||
|
||||
// Finally, commit transaction
|
||||
err = tx.Commit()
|
||||
return conn.ProcessError(err)
|
||||
defer func() {
|
||||
if !done {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
// Perform supplied transaction
|
||||
if err := fn(tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally, commit
|
||||
err = tx.Commit()
|
||||
done = true
|
||||
return err
|
||||
}())
|
||||
}
|
||||
|
||||
// ProcessError processes an error to replace any known values with our own db.Error types,
|
||||
|
|
@ -83,7 +91,6 @@ func (conn *DBConn) Exists(ctx context.Context, query *bun.SelectQuery) (bool, d
|
|||
|
||||
// NotExists is the functional opposite of conn.Exists()
|
||||
func (conn *DBConn) NotExists(ctx context.Context, query *bun.SelectQuery) (bool, db.Error) {
|
||||
// Simply inverse of conn.exists()
|
||||
exists, err := conn.Exists(ctx, query)
|
||||
return !exists, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,6 +74,5 @@ func (d *domainDB) AreURIsBlocked(ctx context.Context, uris []*url.URL) (bool, d
|
|||
for _, uri := range uris {
|
||||
domains = append(domains, uri.Hostname())
|
||||
}
|
||||
|
||||
return d.AreDomainsBlocked(ctx, domains)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,35 +26,33 @@ import (
|
|||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
func newDebugQueryHook() bun.QueryHook {
|
||||
return &debugQueryHook{}
|
||||
}
|
||||
// queryHook implements bun.QueryHook
|
||||
type queryHook struct{}
|
||||
|
||||
// debugQueryHook implements bun.QueryHook
|
||||
type debugQueryHook struct {
|
||||
}
|
||||
|
||||
func (q *debugQueryHook) BeforeQuery(ctx context.Context, _ *bun.QueryEvent) context.Context {
|
||||
// do nothing
|
||||
return ctx
|
||||
func (queryHook) BeforeQuery(ctx context.Context, _ *bun.QueryEvent) context.Context {
|
||||
return ctx // do nothing
|
||||
}
|
||||
|
||||
// AfterQuery logs the time taken to query, the operation (select, update, etc), and the query itself as translated by bun.
|
||||
func (q *debugQueryHook) AfterQuery(_ context.Context, event *bun.QueryEvent) {
|
||||
dur := time.Since(event.StartTime).Round(time.Microsecond)
|
||||
l := logrus.WithFields(logrus.Fields{
|
||||
"duration": dur,
|
||||
"operation": event.Operation(),
|
||||
})
|
||||
func (queryHook) AfterQuery(_ context.Context, event *bun.QueryEvent) {
|
||||
// Get the DB query duration
|
||||
dur := time.Since(event.StartTime)
|
||||
|
||||
if dur > 1*time.Second {
|
||||
l.Warnf("SLOW DATABASE QUERY [%s] %s", dur, event.Query)
|
||||
return
|
||||
log := func(lvl logrus.Level, msg string) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"duration": dur,
|
||||
"operation": event.Operation(),
|
||||
"query": event.Query,
|
||||
}).Log(lvl, msg)
|
||||
}
|
||||
|
||||
if logrus.GetLevel() == logrus.TraceLevel {
|
||||
l.Tracef("[%s] %s", dur, event.Query)
|
||||
} else {
|
||||
l.Debugf("[%s] %s", dur, event.Operation())
|
||||
switch {
|
||||
// Warn on slow database queries
|
||||
case dur > time.Second:
|
||||
log(logrus.WarnLevel, "SLOW DATABASE QUERY")
|
||||
|
||||
// On trace, we log query information
|
||||
case logrus.GetLevel() == logrus.TraceLevel:
|
||||
log(logrus.TraceLevel, "database query")
|
||||
}
|
||||
}
|
||||
|
|
@ -21,7 +21,8 @@ package bundb
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ReneKroon/ttlcache"
|
||||
"codeberg.org/gruf/go-cache/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/uptrace/bun"
|
||||
|
|
@ -29,7 +30,7 @@ import (
|
|||
|
||||
type mentionDB struct {
|
||||
conn *DBConn
|
||||
cache *ttlcache.Cache
|
||||
cache cache.Cache[string, *gtsmodel.Mention]
|
||||
}
|
||||
|
||||
func (m *mentionDB) newMentionQ(i interface{}) *bun.SelectQuery {
|
||||
|
|
@ -41,40 +42,24 @@ func (m *mentionDB) newMentionQ(i interface{}) *bun.SelectQuery {
|
|||
Relation("TargetAccount")
|
||||
}
|
||||
|
||||
func (m *mentionDB) getMentionCached(id string) (*gtsmodel.Mention, bool) {
|
||||
v, ok := m.cache.Get(id)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
mention, ok := v.(*gtsmodel.Mention)
|
||||
if !ok {
|
||||
panic("mention cache entry was not a mention")
|
||||
}
|
||||
|
||||
return mention, true
|
||||
}
|
||||
|
||||
func (m *mentionDB) putMentionCache(mention *gtsmodel.Mention) {
|
||||
m.cache.Set(mention.ID, mention)
|
||||
}
|
||||
|
||||
func (m *mentionDB) getMentionDB(ctx context.Context, id string) (*gtsmodel.Mention, db.Error) {
|
||||
mention := >smodel.Mention{}
|
||||
mention := gtsmodel.Mention{}
|
||||
|
||||
q := m.newMentionQ(mention).
|
||||
q := m.newMentionQ(&mention).
|
||||
Where("mention.id = ?", id)
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
return nil, m.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
m.putMentionCache(mention)
|
||||
return mention, nil
|
||||
copy := mention
|
||||
m.cache.Set(mention.ID, ©)
|
||||
|
||||
return &mention, nil
|
||||
}
|
||||
|
||||
func (m *mentionDB) GetMention(ctx context.Context, id string) (*gtsmodel.Mention, db.Error) {
|
||||
if mention, cached := m.getMentionCached(id); cached {
|
||||
if mention, ok := m.cache.Get(id); ok {
|
||||
return mention, nil
|
||||
}
|
||||
return m.getMentionDB(ctx, id)
|
||||
|
|
@ -84,16 +69,11 @@ func (m *mentionDB) GetMentions(ctx context.Context, ids []string) ([]*gtsmodel.
|
|||
mentions := make([]*gtsmodel.Mention, 0, len(ids))
|
||||
|
||||
for _, id := range ids {
|
||||
// Attempt fetch from cache
|
||||
mention, cached := m.getMentionCached(id)
|
||||
if cached {
|
||||
mentions = append(mentions, mention)
|
||||
}
|
||||
|
||||
// Attempt fetch from DB
|
||||
mention, err := m.getMentionDB(ctx, id)
|
||||
mention, err := m.GetMention(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
logrus.Errorf("GetMentions: error getting mention %q: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Append mention
|
||||
|
|
|
|||
|
|
@ -21,37 +21,39 @@ package bundb
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ReneKroon/ttlcache"
|
||||
"codeberg.org/gruf/go-cache/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type notificationDB struct {
|
||||
conn *DBConn
|
||||
cache *ttlcache.Cache
|
||||
}
|
||||
|
||||
func (n *notificationDB) newNotificationQ(i interface{}) *bun.SelectQuery {
|
||||
return n.conn.
|
||||
NewSelect().
|
||||
Model(i).
|
||||
Relation("OriginAccount").
|
||||
Relation("TargetAccount").
|
||||
Relation("Status")
|
||||
cache cache.Cache[string, *gtsmodel.Notification]
|
||||
}
|
||||
|
||||
func (n *notificationDB) GetNotification(ctx context.Context, id string) (*gtsmodel.Notification, db.Error) {
|
||||
if notification, cached := n.getNotificationCache(id); cached {
|
||||
if notification, ok := n.cache.Get(id); ok {
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
notif := >smodel.Notification{}
|
||||
err := n.getNotificationDB(ctx, id, notif)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
dst := gtsmodel.Notification{ID: id}
|
||||
|
||||
q := n.conn.NewSelect().
|
||||
Model(&dst).
|
||||
Relation("OriginAccount").
|
||||
Relation("TargetAccount").
|
||||
Relation("Status").
|
||||
WherePK()
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
return nil, n.conn.ProcessError(err)
|
||||
}
|
||||
return notif, nil
|
||||
|
||||
copy := dst
|
||||
n.cache.Set(id, ©)
|
||||
|
||||
return &dst, nil
|
||||
}
|
||||
|
||||
func (n *notificationDB) GetNotifications(ctx context.Context, accountID string, limit int, maxID string, sinceID string) ([]*gtsmodel.Notification, db.Error) {
|
||||
|
|
@ -61,11 +63,11 @@ func (n *notificationDB) GetNotifications(ctx context.Context, accountID string,
|
|||
}
|
||||
|
||||
// Make a guess for slice size
|
||||
notifications := make([]*gtsmodel.Notification, 0, limit)
|
||||
notifIDs := make([]string, 0, limit)
|
||||
|
||||
q := n.conn.
|
||||
NewSelect().
|
||||
Model(¬ifications).
|
||||
Table("notifications").
|
||||
Column("id")
|
||||
|
||||
if maxID != "" {
|
||||
|
|
@ -84,56 +86,25 @@ func (n *notificationDB) GetNotifications(ctx context.Context, accountID string,
|
|||
q = q.Limit(limit)
|
||||
}
|
||||
|
||||
err := q.Scan(ctx)
|
||||
if err != nil {
|
||||
if err := q.Scan(ctx, ¬ifIDs); err != nil {
|
||||
return nil, n.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
notifs := make([]*gtsmodel.Notification, 0, limit)
|
||||
|
||||
// now we have the IDs, select the notifs one by one
|
||||
// reason for this is that for each notif, we can instead get it from our cache if it's cached
|
||||
for i, notif := range notifications {
|
||||
// Check cache for notification
|
||||
nn, cached := n.getNotificationCache(notif.ID)
|
||||
if cached {
|
||||
notifications[i] = nn
|
||||
for _, id := range notifIDs {
|
||||
// Attempt fetch from DB
|
||||
notif, err := n.GetNotification(ctx, id)
|
||||
if err != nil {
|
||||
logrus.Errorf("GetNotifications: error getting notification %q: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check DB for notification
|
||||
err := n.getNotificationDB(ctx, notif.ID, notif)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Append notification
|
||||
notifs = append(notifs, notif)
|
||||
}
|
||||
|
||||
return notifications, nil
|
||||
}
|
||||
|
||||
func (n *notificationDB) getNotificationCache(id string) (*gtsmodel.Notification, bool) {
|
||||
v, ok := n.cache.Get(id)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
notif, ok := v.(*gtsmodel.Notification)
|
||||
if !ok {
|
||||
panic("notification cache entry was not a notification")
|
||||
}
|
||||
|
||||
return notif, true
|
||||
}
|
||||
|
||||
func (n *notificationDB) putNotificationCache(notif *gtsmodel.Notification) {
|
||||
n.cache.Set(notif.ID, notif)
|
||||
}
|
||||
|
||||
func (n *notificationDB) getNotificationDB(ctx context.Context, id string, dst *gtsmodel.Notification) error {
|
||||
q := n.newNotificationQ(dst).WherePK()
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
return n.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
n.putNotificationCache(dst)
|
||||
return nil
|
||||
return notifs, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ package bundb
|
|||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -219,21 +220,32 @@ func (s *statusDB) GetStatusChildren(ctx context.Context, status *gtsmodel.Statu
|
|||
}
|
||||
|
||||
func (s *statusDB) statusChildren(ctx context.Context, status *gtsmodel.Status, foundStatuses *list.List, onlyDirect bool, minID string) {
|
||||
immediateChildren := []*gtsmodel.Status{}
|
||||
childIDs := []string{}
|
||||
|
||||
q := s.conn.
|
||||
NewSelect().
|
||||
Model(&immediateChildren).
|
||||
Table("statuses").
|
||||
Column("id").
|
||||
Where("in_reply_to_id = ?", status.ID)
|
||||
if minID != "" {
|
||||
q = q.Where("status.id > ?", minID)
|
||||
q = q.Where("id > ?", minID)
|
||||
}
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
if err := q.Scan(ctx, &childIDs); err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
logrus.Errorf("statusChildren: error getting children for %q: %v", status.ID, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, child := range immediateChildren {
|
||||
for _, id := range childIDs {
|
||||
// Fetch child with ID from database
|
||||
child, err := s.GetStatusByID(ctx, id)
|
||||
if err != nil {
|
||||
logrus.Errorf("statusChildren: error getting child status %q: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
insertLoop:
|
||||
for e := foundStatuses.Front(); e != nil; e = e.Next() {
|
||||
entry, ok := e.Value.(*gtsmodel.Status)
|
||||
|
|
|
|||
|
|
@ -20,55 +20,52 @@ package bundb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"sort"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/uptrace/bun"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type timelineDB struct {
|
||||
conn *DBConn
|
||||
conn *DBConn
|
||||
status *statusDB
|
||||
}
|
||||
|
||||
func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, maxID string, sinceID string, minID string, limit int, local bool) ([]*gtsmodel.Status, db.Error) {
|
||||
// Ensure reasonable
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Make educated guess for slice size
|
||||
statuses := make([]*gtsmodel.Status, 0, limit)
|
||||
statusIDs := make([]string, 0, limit)
|
||||
|
||||
q := t.conn.
|
||||
NewSelect().
|
||||
Model(&statuses)
|
||||
Table("statuses").
|
||||
|
||||
q = q.ColumnExpr("status.*").
|
||||
// Select only IDs from table
|
||||
Column("statuses.id").
|
||||
// Find out who accountID follows.
|
||||
Join("LEFT JOIN follows AS f ON f.target_account_id = status.account_id").
|
||||
Join("LEFT JOIN follows ON follows.target_account_id = statuses.account_id AND follows.account_id = ?", accountID).
|
||||
// Sort by highest ID (newest) to lowest ID (oldest)
|
||||
Order("status.id DESC")
|
||||
Order("statuses.id DESC")
|
||||
|
||||
if maxID != "" {
|
||||
// return only statuses LOWER (ie., older) than maxID
|
||||
q = q.Where("status.id < ?", maxID)
|
||||
q = q.Where("statuses.id < ?", maxID)
|
||||
}
|
||||
|
||||
if sinceID != "" {
|
||||
// return only statuses HIGHER (ie., newer) than sinceID
|
||||
q = q.Where("status.id > ?", sinceID)
|
||||
q = q.Where("statuses.id > ?", sinceID)
|
||||
}
|
||||
|
||||
if minID != "" {
|
||||
// return only statuses HIGHER (ie., newer) than minID
|
||||
q = q.Where("status.id > ?", minID)
|
||||
q = q.Where("statuses.id > ?", minID)
|
||||
}
|
||||
|
||||
if local {
|
||||
// return only statuses posted by local account havers
|
||||
q = q.Where("status.local = ?", local)
|
||||
q = q.Where("statuses.local = ?", local)
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
|
|
@ -83,15 +80,30 @@ func (t *timelineDB) GetHomeTimeline(ctx context.Context, accountID string, maxI
|
|||
// See: https://bun.uptrace.dev/guide/queries.html#select
|
||||
whereGroup := func(*bun.SelectQuery) *bun.SelectQuery {
|
||||
return q.
|
||||
WhereOr("f.account_id = ?", accountID).
|
||||
WhereOr("status.account_id = ?", accountID)
|
||||
WhereOr("follows.account_id = ?", accountID).
|
||||
WhereOr("statuses.account_id = ?", accountID)
|
||||
}
|
||||
|
||||
q = q.WhereGroup(" AND ", whereGroup)
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
if err := q.Scan(ctx, &statusIDs); err != nil {
|
||||
return nil, t.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
||||
|
||||
for _, id := range statusIDs {
|
||||
// Fetch status from db for ID
|
||||
status, err := t.status.GetStatusByID(ctx, id)
|
||||
if err != nil {
|
||||
logrus.Errorf("GetHomeTimeline: error fetching status %q: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Append status to slice
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
|
|
@ -102,40 +114,56 @@ func (t *timelineDB) GetPublicTimeline(ctx context.Context, accountID string, ma
|
|||
}
|
||||
|
||||
// Make educated guess for slice size
|
||||
statuses := make([]*gtsmodel.Status, 0, limit)
|
||||
statusIDs := make([]string, 0, limit)
|
||||
|
||||
q := t.conn.
|
||||
NewSelect().
|
||||
Model(&statuses).
|
||||
Where("visibility = ?", gtsmodel.VisibilityPublic).
|
||||
WhereGroup(" AND ", whereEmptyOrNull("in_reply_to_id")).
|
||||
WhereGroup(" AND ", whereEmptyOrNull("in_reply_to_uri")).
|
||||
WhereGroup(" AND ", whereEmptyOrNull("boost_of_id")).
|
||||
Order("status.id DESC")
|
||||
Table("statuses").
|
||||
Column("statuses.id").
|
||||
Where("statuses.visibility = ?", gtsmodel.VisibilityPublic).
|
||||
WhereGroup(" AND ", whereEmptyOrNull("statuses.in_reply_to_id")).
|
||||
WhereGroup(" AND ", whereEmptyOrNull("statuses.in_reply_to_uri")).
|
||||
WhereGroup(" AND ", whereEmptyOrNull("statuses.boost_of_id")).
|
||||
Order("statuses.id DESC")
|
||||
|
||||
if maxID != "" {
|
||||
q = q.Where("status.id < ?", maxID)
|
||||
q = q.Where("statuses.id < ?", maxID)
|
||||
}
|
||||
|
||||
if sinceID != "" {
|
||||
q = q.Where("status.id > ?", sinceID)
|
||||
q = q.Where("statuses.id > ?", sinceID)
|
||||
}
|
||||
|
||||
if minID != "" {
|
||||
q = q.Where("status.id > ?", minID)
|
||||
q = q.Where("statuses.id > ?", minID)
|
||||
}
|
||||
|
||||
if local {
|
||||
q = q.Where("status.local = ?", local)
|
||||
q = q.Where("statuses.local = ?", local)
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
q = q.Limit(limit)
|
||||
}
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
if err := q.Scan(ctx, &statusIDs); err != nil {
|
||||
return nil, t.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
||||
|
||||
for _, id := range statusIDs {
|
||||
// Fetch status from db for ID
|
||||
status, err := t.status.GetStatusByID(ctx, id)
|
||||
if err != nil {
|
||||
logrus.Errorf("GetPublicTimeline: error fetching status %q: %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Append status to slice
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
|
|
@ -170,46 +198,32 @@ func (t *timelineDB) GetFavedTimeline(ctx context.Context, accountID string, max
|
|||
|
||||
err := fq.Scan(ctx)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, "", "", db.ErrNoEntries
|
||||
}
|
||||
return nil, "", "", err
|
||||
return nil, "", "", t.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
if len(faves) == 0 {
|
||||
return nil, "", "", db.ErrNoEntries
|
||||
}
|
||||
|
||||
// map[statusID]faveID -- we need this to sort statuses by fave ID rather than status ID
|
||||
statusesFavesMap := make(map[string]string, len(faves))
|
||||
statusIDs := make([]string, 0, len(faves))
|
||||
for _, f := range faves {
|
||||
statusesFavesMap[f.StatusID] = f.ID
|
||||
statusIDs = append(statusIDs, f.StatusID)
|
||||
}
|
||||
|
||||
statuses := make([]*gtsmodel.Status, 0, len(statusIDs))
|
||||
|
||||
err = t.conn.
|
||||
NewSelect().
|
||||
Model(&statuses).
|
||||
Where("id IN (?)", bun.In(statusIDs)).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", t.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
if len(statuses) == 0 {
|
||||
return nil, "", "", db.ErrNoEntries
|
||||
}
|
||||
|
||||
// arrange statuses by fave ID
|
||||
sort.Slice(statuses, func(i int, j int) bool {
|
||||
statusI := statuses[i]
|
||||
statusJ := statuses[j]
|
||||
return statusesFavesMap[statusI.ID] < statusesFavesMap[statusJ.ID]
|
||||
// Sort by favourite ID rather than status ID
|
||||
slices.SortFunc(faves, func(a, b *gtsmodel.StatusFave) bool {
|
||||
return a.ID < b.ID
|
||||
})
|
||||
|
||||
statuses := make([]*gtsmodel.Status, 0, len(faves))
|
||||
|
||||
for _, fave := range faves {
|
||||
// Fetch status from db for corresponding favourite
|
||||
status, err := t.status.GetStatusByID(ctx, fave.StatusID)
|
||||
if err != nil {
|
||||
logrus.Errorf("GetFavedTimeline: error fetching status for fave %q: %v", fave.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Append status to slice
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
|
||||
nextMaxID := faves[len(faves)-1].ID
|
||||
prevMinID := faves[0].ID
|
||||
return statuses, nextMaxID, prevMinID, nil
|
||||
|
|
|
|||
|
|
@ -145,11 +145,12 @@ selectStatusesLoop:
|
|||
for {
|
||||
statuses, err := p.db.GetAccountStatuses(ctx, account.ID, 20, false, false, maxID, "", false, false, false)
|
||||
if err != nil {
|
||||
if err == db.ErrNoEntries {
|
||||
if errors.Is(err, db.ErrNoEntries) {
|
||||
// no statuses left for this instance so we're done
|
||||
l.Infof("Delete: done iterating through statuses for account %s", account.Username)
|
||||
break selectStatusesLoop
|
||||
}
|
||||
|
||||
// an actual error has occurred
|
||||
l.Errorf("Delete: db error selecting statuses for account %s: %s", account.Username, err)
|
||||
break selectStatusesLoop
|
||||
|
|
@ -158,6 +159,7 @@ selectStatusesLoop:
|
|||
for i, s := range statuses {
|
||||
// pass the status delete through the client api channel for processing
|
||||
s.Account = account
|
||||
|
||||
l.Debug("putting status in the client api channel")
|
||||
p.clientWorker.Queue(messages.FromClientAPI{
|
||||
APObjectType: ap.ObjectNote,
|
||||
|
|
@ -168,20 +170,20 @@ selectStatusesLoop:
|
|||
})
|
||||
|
||||
if err := p.db.DeleteByID(ctx, s.ID, s); err != nil {
|
||||
if err != db.ErrNoEntries {
|
||||
if !errors.Is(err, db.ErrNoEntries) {
|
||||
// actual error has occurred
|
||||
l.Errorf("Delete: db error status %s for account %s: %s", s.ID, account.Username, err)
|
||||
break selectStatusesLoop
|
||||
l.Errorf("Delete: db error deleting status %s for account %s: %s", s.ID, account.Username, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if there are any boosts of this status, delete them as well
|
||||
boosts := []*gtsmodel.Status{}
|
||||
if err := p.db.GetWhere(ctx, []db.Where{{Key: "boost_of_id", Value: s.ID}}, &boosts); err != nil {
|
||||
if err != db.ErrNoEntries {
|
||||
if !errors.Is(err, db.ErrNoEntries) {
|
||||
// an actual error has occurred
|
||||
l.Errorf("Delete: db error selecting boosts of status %s for account %s: %s", s.ID, account.Username, err)
|
||||
break selectStatusesLoop
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -189,8 +191,10 @@ selectStatusesLoop:
|
|||
if b.Account == nil {
|
||||
bAccount, err := p.db.GetAccountByID(ctx, b.AccountID)
|
||||
if err != nil {
|
||||
l.Errorf("Delete: db error populating boosted status account: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
b.Account = bAccount
|
||||
}
|
||||
|
||||
|
|
@ -207,7 +211,7 @@ selectStatusesLoop:
|
|||
if err != db.ErrNoEntries {
|
||||
// actual error has occurred
|
||||
l.Errorf("Delete: db error deleting boost with id %s: %s", b.ID, err)
|
||||
break selectStatusesLoop
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,10 +57,14 @@ func (suite *AccountTestSuite) TestAccountDeleteLocal() {
|
|||
DeleteOriginID: deletingAccount.ID,
|
||||
})
|
||||
suite.NoError(errWithCode)
|
||||
time.Sleep(1 * time.Second) // wait a sec for the delete to process
|
||||
|
||||
// the delete should be federated outwards to the following account's inbox
|
||||
sent, ok := suite.httpClient.SentMessages[followingAccount.InboxURI]
|
||||
var sent []byte
|
||||
var ok bool
|
||||
for !ok {
|
||||
sent, ok = suite.httpClient.SentMessages[followingAccount.InboxURI]
|
||||
}
|
||||
|
||||
suite.True(ok)
|
||||
delete := &struct {
|
||||
Actor string `json:"actor"`
|
||||
|
|
@ -79,6 +83,9 @@ func (suite *AccountTestSuite) TestAccountDeleteLocal() {
|
|||
suite.Equal(pub.PublicActivityPubIRI, delete.CC)
|
||||
suite.Equal("Delete", delete.Type)
|
||||
|
||||
// wait for the delete to go through
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// the deleted account should be deleted
|
||||
dbAccount, err := suite.db.GetAccountByID(ctx, deletingAccount.ID)
|
||||
suite.NoError(err)
|
||||
|
|
|
|||
|
|
@ -76,10 +76,10 @@ func (suite *IndexTestSuite) TestIndexBeforeLowID() {
|
|||
|
||||
postID, err := suite.timeline.OldestIndexedItemID(context.Background())
|
||||
suite.NoError(err)
|
||||
suite.Equal("01F8MHC0H0A7XHTVH5F596ZKBM", postID)
|
||||
suite.Equal("01F8MHBQCBTDKN6X5VHGMMN4MA", postID)
|
||||
|
||||
indexLength := suite.timeline.ItemIndexLength(context.Background())
|
||||
suite.Equal(9, indexLength)
|
||||
suite.Equal(10, indexLength)
|
||||
}
|
||||
|
||||
func (suite *IndexTestSuite) TestIndexBeforeHighID() {
|
||||
|
|
@ -107,9 +107,8 @@ func (suite *IndexTestSuite) TestIndexBehindHighID() {
|
|||
suite.NoError(err)
|
||||
suite.Equal("01G36SF3V6Y6V5BF9P4R7PQG7G", postID)
|
||||
|
||||
// indexLength should be 9 because that's all this user has hometimelineable
|
||||
indexLength := suite.timeline.ItemIndexLength(context.Background())
|
||||
suite.Equal(9, indexLength)
|
||||
suite.Equal(10, indexLength)
|
||||
}
|
||||
|
||||
func (suite *IndexTestSuite) TestIndexBehindLowID() {
|
||||
|
|
|
|||
|
|
@ -62,88 +62,90 @@ func (suite *ManagerTestSuite) TearDownTest() {
|
|||
}
|
||||
|
||||
func (suite *ManagerTestSuite) TestManagerIntegration() {
|
||||
ctx := context.Background()
|
||||
|
||||
testAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
// should start at 0
|
||||
indexedLen := suite.manager.GetIndexedLength(context.Background(), testAccount.ID)
|
||||
indexedLen := suite.manager.GetIndexedLength(ctx, testAccount.ID)
|
||||
suite.Equal(0, indexedLen)
|
||||
|
||||
// oldestIndexed should be empty string since there's nothing indexed
|
||||
oldestIndexed, err := suite.manager.GetOldestIndexedID(context.Background(), testAccount.ID)
|
||||
oldestIndexed, err := suite.manager.GetOldestIndexedID(ctx, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.Empty(oldestIndexed)
|
||||
|
||||
// trigger status preparation
|
||||
err = suite.manager.PrepareXFromTop(context.Background(), testAccount.ID, 20)
|
||||
err = suite.manager.PrepareXFromTop(ctx, testAccount.ID, 20)
|
||||
suite.NoError(err)
|
||||
|
||||
// local_account_1 can see 15 statuses out of the testrig statuses in its home timeline
|
||||
indexedLen = suite.manager.GetIndexedLength(context.Background(), testAccount.ID)
|
||||
suite.Equal(15, indexedLen)
|
||||
// local_account_1 can see 16 statuses out of the testrig statuses in its home timeline
|
||||
indexedLen = suite.manager.GetIndexedLength(ctx, testAccount.ID)
|
||||
suite.Equal(16, indexedLen)
|
||||
|
||||
// oldest should now be set
|
||||
oldestIndexed, err = suite.manager.GetOldestIndexedID(context.Background(), testAccount.ID)
|
||||
oldestIndexed, err = suite.manager.GetOldestIndexedID(ctx, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.Equal("01F8MH82FYRXD2RC6108DAJ5HB", oldestIndexed)
|
||||
suite.Equal("01F8MH75CBF9JFX4ZAD54N0W0R", oldestIndexed)
|
||||
|
||||
// get hometimeline
|
||||
statuses, err := suite.manager.GetTimeline(context.Background(), testAccount.ID, "", "", "", 20, false)
|
||||
statuses, err := suite.manager.GetTimeline(ctx, testAccount.ID, "", "", "", 20, false)
|
||||
suite.NoError(err)
|
||||
suite.Len(statuses, 15)
|
||||
suite.Len(statuses, 16)
|
||||
|
||||
// now wipe the last status from all timelines, as though it had been deleted by the owner
|
||||
err = suite.manager.WipeItemFromAllTimelines(context.Background(), "01F8MH82FYRXD2RC6108DAJ5HB")
|
||||
err = suite.manager.WipeItemFromAllTimelines(ctx, "01F8MH75CBF9JFX4ZAD54N0W0R")
|
||||
suite.NoError(err)
|
||||
|
||||
// timeline should be shorter
|
||||
indexedLen = suite.manager.GetIndexedLength(context.Background(), testAccount.ID)
|
||||
suite.Equal(14, indexedLen)
|
||||
indexedLen = suite.manager.GetIndexedLength(ctx, testAccount.ID)
|
||||
suite.Equal(15, indexedLen)
|
||||
|
||||
// oldest should now be different
|
||||
oldestIndexed, err = suite.manager.GetOldestIndexedID(context.Background(), testAccount.ID)
|
||||
oldestIndexed, err = suite.manager.GetOldestIndexedID(ctx, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.Equal("01F8MHAAY43M6RJ473VQFCVH37", oldestIndexed)
|
||||
suite.Equal("01F8MH82FYRXD2RC6108DAJ5HB", oldestIndexed)
|
||||
|
||||
// delete the new oldest status specifically from this timeline, as though local_account_1 had muted or blocked it
|
||||
removed, err := suite.manager.Remove(context.Background(), testAccount.ID, "01F8MHAAY43M6RJ473VQFCVH37")
|
||||
removed, err := suite.manager.Remove(ctx, testAccount.ID, "01F8MH82FYRXD2RC6108DAJ5HB")
|
||||
suite.NoError(err)
|
||||
suite.Equal(2, removed) // 1 status should be removed, but from both indexed and prepared, so 2 removals total
|
||||
|
||||
// timeline should be shorter
|
||||
indexedLen = suite.manager.GetIndexedLength(context.Background(), testAccount.ID)
|
||||
suite.Equal(13, indexedLen)
|
||||
indexedLen = suite.manager.GetIndexedLength(ctx, testAccount.ID)
|
||||
suite.Equal(14, indexedLen)
|
||||
|
||||
// oldest should now be different
|
||||
oldestIndexed, err = suite.manager.GetOldestIndexedID(context.Background(), testAccount.ID)
|
||||
oldestIndexed, err = suite.manager.GetOldestIndexedID(ctx, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.Equal("01F8MHAMCHF6Y650WCRSCP4WMY", oldestIndexed)
|
||||
suite.Equal("01F8MHAAY43M6RJ473VQFCVH37", oldestIndexed)
|
||||
|
||||
// now remove all entries by local_account_2 from the timeline
|
||||
err = suite.manager.WipeItemsFromAccountID(context.Background(), testAccount.ID, suite.testAccounts["local_account_2"].ID)
|
||||
err = suite.manager.WipeItemsFromAccountID(ctx, testAccount.ID, suite.testAccounts["local_account_2"].ID)
|
||||
suite.NoError(err)
|
||||
|
||||
// timeline should be shorter
|
||||
indexedLen = suite.manager.GetIndexedLength(context.Background(), testAccount.ID)
|
||||
suite.Equal(6, indexedLen)
|
||||
indexedLen = suite.manager.GetIndexedLength(ctx, testAccount.ID)
|
||||
suite.Equal(7, indexedLen)
|
||||
|
||||
// ingest 1 into the timeline
|
||||
status1 := suite.testStatuses["admin_account_status_1"]
|
||||
ingested, err := suite.manager.Ingest(context.Background(), status1, testAccount.ID)
|
||||
ingested, err := suite.manager.Ingest(ctx, status1, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.True(ingested)
|
||||
|
||||
// ingest and prepare another one into the timeline
|
||||
status2 := suite.testStatuses["local_account_2_status_1"]
|
||||
ingested, err = suite.manager.IngestAndPrepare(context.Background(), status2, testAccount.ID)
|
||||
ingested, err = suite.manager.IngestAndPrepare(ctx, status2, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.True(ingested)
|
||||
|
||||
// timeline should be longer now
|
||||
indexedLen = suite.manager.GetIndexedLength(context.Background(), testAccount.ID)
|
||||
suite.Equal(8, indexedLen)
|
||||
indexedLen = suite.manager.GetIndexedLength(ctx, testAccount.ID)
|
||||
suite.Equal(9, indexedLen)
|
||||
|
||||
// try to ingest status 2 again
|
||||
ingested, err = suite.manager.IngestAndPrepare(context.Background(), status2, testAccount.ID)
|
||||
ingested, err = suite.manager.IngestAndPrepare(ctx, status2, testAccount.ID)
|
||||
suite.NoError(err)
|
||||
suite.False(ingested) // should be false since it's a duplicate
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/superseriousbusiness/activity/streams/vocab"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/ap"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/cache"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
)
|
||||
|
|
@ -194,14 +193,10 @@ type TypeConverter interface {
|
|||
}
|
||||
|
||||
type converter struct {
|
||||
db db.DB
|
||||
asCache cache.Cache
|
||||
db db.DB
|
||||
}
|
||||
|
||||
// NewConverter returns a new Converter
|
||||
func NewConverter(db db.DB) TypeConverter {
|
||||
return &converter{
|
||||
db: db,
|
||||
asCache: cache.New(),
|
||||
}
|
||||
return &converter{db: db}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -348,14 +348,6 @@ func (c *converter) AccountToASMinimal(ctx context.Context, a *gtsmodel.Account)
|
|||
}
|
||||
|
||||
func (c *converter) StatusToAS(ctx context.Context, s *gtsmodel.Status) (vocab.ActivityStreamsNote, error) {
|
||||
// first check if we have this note in our asCache already
|
||||
if noteI, err := c.asCache.Fetch(s.ID); err == nil {
|
||||
if note, ok := noteI.(vocab.ActivityStreamsNote); ok {
|
||||
// we have it, so just return it as-is
|
||||
return note, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ensure prerequisites here before we get stuck in
|
||||
|
||||
// check if author account is already attached to status and attach it if not
|
||||
|
|
@ -550,11 +542,6 @@ func (c *converter) StatusToAS(ctx context.Context, s *gtsmodel.Status) (vocab.A
|
|||
sensitiveProp.AppendXMLSchemaBoolean(s.Sensitive)
|
||||
status.SetActivityStreamsSensitive(sensitiveProp)
|
||||
|
||||
// put the note in our cache in case we need it again soon
|
||||
if err := c.asCache.Store(s.ID, status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue