mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-10-29 23:12:26 -05:00
much improved status timeline code comments
This commit is contained in:
parent
cc86a08f15
commit
00d8a1f8ac
3 changed files with 277 additions and 240 deletions
129
internal/cache/timeline/preload.go
vendored
Normal file
129
internal/cache/timeline/preload.go
vendored
Normal file
|
|
@ -0,0 +1,129 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package timeline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// preloader provides a means of synchronising the
|
||||||
|
// initial fill, or "preload", of a timeline cache.
|
||||||
|
// it has 4 possible states in the atomic pointer:
|
||||||
|
// - preloading = &(interface{}(*sync.WaitGroup))
|
||||||
|
// - preloaded = &(interface{}(nil))
|
||||||
|
// - needs preload = &(interface{}(false))
|
||||||
|
// - brand-new = nil (functionally same as 'needs preload')
|
||||||
|
type preloader struct{ p atomic.Pointer[any] }
|
||||||
|
|
||||||
|
// Check will concurrency-safely check the preload
|
||||||
|
// state, and if needed call the provided function.
|
||||||
|
// if a preload is in progress, it will wait until complete.
|
||||||
|
func (p *preloader) Check(preload func()) {
|
||||||
|
for {
|
||||||
|
// Get state ptr.
|
||||||
|
ptr := p.p.Load()
|
||||||
|
|
||||||
|
if ptr == nil || *ptr == false {
|
||||||
|
// Needs preloading, start it.
|
||||||
|
ok := p.start(ptr, preload)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
// Failed to acquire start,
|
||||||
|
// other thread beat us to it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success!
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for a preload currently in progress.
|
||||||
|
if wg, _ := (*ptr).(*sync.WaitGroup); wg != nil {
|
||||||
|
wg.Wait()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anything else
|
||||||
|
// means success.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// start attempts to start the given preload function, by
|
||||||
|
// performing a CAS operation with 'old'. return is success.
|
||||||
|
func (p *preloader) start(old *any, preload func()) bool {
|
||||||
|
|
||||||
|
// Optimistically setup a
|
||||||
|
// new waitgroup to set as
|
||||||
|
// the preload waiter.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Wrap waitgroup in
|
||||||
|
// 'any' for pointer.
|
||||||
|
new := any(&wg)
|
||||||
|
|
||||||
|
// Attempt CAS operation to claim start.
|
||||||
|
started := p.p.CompareAndSwap(old, &new)
|
||||||
|
if !started {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start.
|
||||||
|
preload()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// done marks state as preloaded,
|
||||||
|
// i.e. no more preload required.
|
||||||
|
func (p *preloader) done() {
|
||||||
|
old := p.p.Swap(new(any))
|
||||||
|
if old == nil { // was brand-new
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch t := (*old).(type) {
|
||||||
|
case *sync.WaitGroup: // was preloading
|
||||||
|
default:
|
||||||
|
log.Errorf(nil, "BUG: invalid preloader state: %#v", t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear will clear the state, marking a "preload" as required.
|
||||||
|
// i.e. next call to Check() will call provided preload func.
|
||||||
|
func (p *preloader) clear() {
|
||||||
|
b := false
|
||||||
|
a := any(b)
|
||||||
|
for {
|
||||||
|
old := p.p.Swap(&a)
|
||||||
|
if old == nil { // was brand-new
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch t := (*old).(type) {
|
||||||
|
case nil: // was preloaded
|
||||||
|
return
|
||||||
|
case bool: // was cleared
|
||||||
|
return
|
||||||
|
case *sync.WaitGroup: // was preloading
|
||||||
|
t.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
337
internal/cache/timeline/status.go
vendored
337
internal/cache/timeline/status.go
vendored
|
|
@ -20,8 +20,6 @@ package timeline
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"slices"
|
"slices"
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"codeberg.org/gruf/go-structr"
|
"codeberg.org/gruf/go-structr"
|
||||||
|
|
||||||
|
|
@ -68,22 +66,44 @@ type StatusMeta struct {
|
||||||
loaded *gtsmodel.Status
|
loaded *gtsmodel.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatusTimeline provides a concurrency-safe timeline
|
// StatusTimeline provides a concurrency-safe sliding-window
|
||||||
// cache of status information. Internally only StatusMeta{}
|
// cache of the freshest statuses in a timeline. Internally,
|
||||||
// objects are stored, and the statuses themselves are loaded
|
// only StatusMeta{} objects themselves are stored, loading
|
||||||
// as-needed, caching prepared frontend representations where
|
// the actual statuses when necessary, but caching prepared
|
||||||
// possible. This is largely wrapping code for our own codebase
|
// frontend API models where possible.
|
||||||
// to be able to smoothly interact with structr.Timeline{}.
|
//
|
||||||
|
// Notes on design:
|
||||||
// ...
|
//
|
||||||
|
// Previously, and initially when designing this newer type,
|
||||||
|
// we had status timeline caches that would dynamically fill
|
||||||
|
// themselves with statuses on call to Load() with statuses
|
||||||
|
// at *any* location in the timeline, while simultaneously
|
||||||
|
// accepting new input of statuses from the background workers.
|
||||||
|
// This unfortunately can lead to situations where posts need
|
||||||
|
// to be fetched from the database, but the cache isn't aware
|
||||||
|
// they exist and instead returns an incomplete selection.
|
||||||
|
// This problem is best outlined by the follow simple example:
|
||||||
|
//
|
||||||
|
// "what if my timeline cache contains posts 0-to-6 and 8-to-12,
|
||||||
|
// and i make a request for posts between 4-and-10 with no limit,
|
||||||
|
// how is it to know that it's missing post 7?"
|
||||||
|
//
|
||||||
|
// The solution is to unfortunately remove a lot of the caching
|
||||||
|
// of "older areas" of the timeline, and instead just have it
|
||||||
|
// be a sliding window of the freshest posts of that timeline.
|
||||||
|
// It gets preloaded initially on start / first-call, and kept
|
||||||
|
// up-to-date with new posts by streamed inserts from background
|
||||||
|
// workers. Any requests for posts outside this we know therefore
|
||||||
|
// must hit the database, (which we then *don't* cache).
|
||||||
type StatusTimeline struct {
|
type StatusTimeline struct {
|
||||||
|
|
||||||
// underlying timeline cache of *StatusMeta{},
|
// underlying timeline cache of *StatusMeta{},
|
||||||
// primary-keyed by ID, with extra indices below.
|
// primary-keyed by ID, with extra indices below.
|
||||||
cache structr.Timeline[*StatusMeta, string]
|
cache structr.Timeline[*StatusMeta, string]
|
||||||
|
|
||||||
// ...
|
// preloader synchronizes preload
|
||||||
preload atomic.Pointer[any]
|
// state of the timeline cache.
|
||||||
|
preloader preloader
|
||||||
|
|
||||||
// fast-access cache indices.
|
// fast-access cache indices.
|
||||||
idx_ID *structr.Index //nolint:revive
|
idx_ID *structr.Index //nolint:revive
|
||||||
|
|
@ -150,79 +170,41 @@ func (t *StatusTimeline) Init(cap int) {
|
||||||
t.max = cap
|
t.max = cap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *StatusTimeline) startPreload(
|
// Preload will fill with StatusTimeline{} cache with
|
||||||
ctx context.Context,
|
// the latest sliding window of status metadata for the
|
||||||
old *any, // old 'preload' ptr
|
// timeline type returned by database 'loadPage' function.
|
||||||
loadPage func(page *paging.Page) ([]*gtsmodel.Status, error),
|
//
|
||||||
filter func(*gtsmodel.Status) (bool, error),
|
// This function is concurrency-safe and repeated calls to
|
||||||
|
// it when already preloaded will be no-ops. To trigger a
|
||||||
|
// preload as being required, call .Clear().
|
||||||
|
func (t *StatusTimeline) Preload(
|
||||||
|
|
||||||
|
// loadPage should load the timeline of given page for cache hydration.
|
||||||
|
loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
|
||||||
|
|
||||||
|
// filter can be used to perform filtering of returned
|
||||||
|
// statuses BEFORE insert into cache. i.e. this will effect
|
||||||
|
// what actually gets stored in the timeline cache.
|
||||||
|
filter func(each *gtsmodel.Status) (delete bool, err error),
|
||||||
) (
|
) (
|
||||||
started bool,
|
n int,
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
// Optimistically setup a
|
t.preloader.Check(func() {
|
||||||
// new waitgroup to set as
|
n, err = t.preload(loadPage, filter)
|
||||||
// the preload waiter.
|
if err != nil {
|
||||||
var wg sync.WaitGroup
|
return
|
||||||
wg.Add(1)
|
}
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
// Wrap waitgroup in
|
// Mark preloaded.
|
||||||
// 'any' for pointer.
|
t.preloader.done()
|
||||||
new := any(&wg)
|
})
|
||||||
|
|
||||||
// Attempt CAS operation to claim preload start.
|
|
||||||
started = t.preload.CompareAndSwap(old, &new)
|
|
||||||
if !started {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Begin the preload.
|
|
||||||
_, err = t.Preload(ctx,
|
|
||||||
loadPage,
|
|
||||||
filter,
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *StatusTimeline) checkPreload(
|
// preload contains the core logic of
|
||||||
ctx context.Context,
|
// Preload(), without t.preloader checks.
|
||||||
loadPage func(page *paging.Page) ([]*gtsmodel.Status, error),
|
func (t *StatusTimeline) preload(
|
||||||
filter func(*gtsmodel.Status) (bool, error),
|
|
||||||
) error {
|
|
||||||
for {
|
|
||||||
// Get preload state.
|
|
||||||
p := t.preload.Load()
|
|
||||||
|
|
||||||
if p == nil || *p == false {
|
|
||||||
// Timeline needs preloading, start this process.
|
|
||||||
ok, err := t.startPreload(ctx, p, loadPage, filter)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
// Failed to acquire start,
|
|
||||||
// other thread beat us to it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return
|
|
||||||
// result.
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for a preload currently in progress.
|
|
||||||
if wg, _ := (*p).(*sync.WaitGroup); wg != nil {
|
|
||||||
wg.Wait()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Anything else means
|
|
||||||
// timeline is ready.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preload ...
|
|
||||||
func (t *StatusTimeline) Preload(
|
|
||||||
ctx context.Context,
|
|
||||||
|
|
||||||
// loadPage should load the timeline of given page for cache hydration.
|
// loadPage should load the timeline of given page for cache hydration.
|
||||||
loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
|
loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
|
||||||
|
|
@ -311,27 +293,15 @@ func (t *StatusTimeline) Preload(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark timeline as preloaded.
|
|
||||||
old := t.preload.Swap(new(any))
|
|
||||||
if old != nil {
|
|
||||||
switch t := (*old).(type) {
|
|
||||||
case *sync.WaitGroup:
|
|
||||||
default:
|
|
||||||
log.Errorf(ctx, "BUG: invalid timeline preload state: %#v", t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load will load timeline statuses according to given
|
// Load will load given page of timeline statuses. First it
|
||||||
// page, using provided callbacks to load extra data when
|
// will prioritize fetching statuses from the sliding window
|
||||||
// necessary, and perform fine-grained filtering loaded
|
// that is the timeline cache of latest statuses, else it will
|
||||||
// database models before eventual return to the user. The
|
// fall back to loading from the database using callback funcs.
|
||||||
// returned strings are the lo, hi ID paging values, used
|
// The returned string values are the low / high status ID
|
||||||
// for generation of next, prev page links in the response.
|
// paging values, used in calculating next / prev page links.
|
||||||
|
|
||||||
// Load ...
|
|
||||||
func (t *StatusTimeline) Load(
|
func (t *StatusTimeline) Load(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
page *paging.Page,
|
page *paging.Page,
|
||||||
|
|
@ -354,13 +324,7 @@ func (t *StatusTimeline) Load(
|
||||||
string, // hi
|
string, // hi
|
||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
// Ensure timeline is loaded.
|
var err error
|
||||||
if err := t.checkPreload(ctx,
|
|
||||||
loadPage,
|
|
||||||
filter,
|
|
||||||
); err != nil {
|
|
||||||
return nil, "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get paging details.
|
// Get paging details.
|
||||||
lo := page.Min.Value
|
lo := page.Min.Value
|
||||||
|
|
@ -376,62 +340,73 @@ func (t *StatusTimeline) Load(
|
||||||
nextPg.Min.Value = lo
|
nextPg.Min.Value = lo
|
||||||
nextPg.Max.Value = hi
|
nextPg.Max.Value = hi
|
||||||
|
|
||||||
// First we attempt to load status
|
// Interstitial meta objects.
|
||||||
// metadata entries from the timeline
|
var metas []*StatusMeta
|
||||||
// cache, up to given limit.
|
|
||||||
metas := t.cache.Select(
|
|
||||||
util.PtrIf(lo),
|
|
||||||
util.PtrIf(hi),
|
|
||||||
util.PtrIf(limit),
|
|
||||||
dir,
|
|
||||||
)
|
|
||||||
|
|
||||||
// We now reset the lo,hi values to
|
|
||||||
// represent the lowest and highest
|
|
||||||
// index values of loaded statuses.
|
|
||||||
//
|
|
||||||
// We continually update these while
|
|
||||||
// building up statuses to return, for
|
|
||||||
// caller to build next / prev page
|
|
||||||
// response values.
|
|
||||||
lo, hi = "", ""
|
|
||||||
|
|
||||||
|
// Returned frontend API statuses.
|
||||||
var apiStatuses []*apimodel.Status
|
var apiStatuses []*apimodel.Status
|
||||||
|
|
||||||
if len(metas) > 0 {
|
// TODO: we can remove this nil
|
||||||
// Before we can do any filtering, we need
|
// check when we've updated all
|
||||||
// to load status models for cached entries.
|
// our timeline endpoints to have
|
||||||
err := loadStatuses(metas, loadIDs)
|
// streamed timeline caches.
|
||||||
|
if t != nil {
|
||||||
|
|
||||||
|
// Ensure timeline has been preloaded.
|
||||||
|
_, err = t.Preload(loadPage, filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", "", gtserror.Newf("error loading statuses: %w", err)
|
return nil, "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set initial lo, hi values.
|
// First we attempt to load status
|
||||||
lo = metas[len(metas)-1].ID
|
// metadata entries from the timeline
|
||||||
hi = metas[0].ID
|
// cache, up to given limit.
|
||||||
|
metas = t.cache.Select(
|
||||||
// Update paging parameters used for next database query.
|
util.PtrIf(lo),
|
||||||
nextPageParams(nextPg, metas[len(metas)-1].ID, order)
|
util.PtrIf(hi),
|
||||||
|
util.PtrIf(limit),
|
||||||
// Allocate slice of expected required API models.
|
dir,
|
||||||
apiStatuses = make([]*apimodel.Status, 0, len(metas))
|
|
||||||
|
|
||||||
// Prepare frontend API models for
|
|
||||||
// the cached statuses. For now this
|
|
||||||
// also does its own extra filtering.
|
|
||||||
apiStatuses = prepareStatuses(ctx,
|
|
||||||
metas,
|
|
||||||
prepareAPI,
|
|
||||||
apiStatuses,
|
|
||||||
limit,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Reset lo, hi values from being
|
||||||
|
// used as input arguments, ready
|
||||||
|
// to be updated for return.
|
||||||
|
lo, hi = "", ""
|
||||||
|
|
||||||
|
if len(metas) > 0 {
|
||||||
|
// Before we can do any filtering, we need
|
||||||
|
// to load status models for cached entries.
|
||||||
|
err = loadStatuses(metas, loadIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", "", gtserror.Newf("error loading statuses: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set initial lo, hi values.
|
||||||
|
lo = metas[len(metas)-1].ID
|
||||||
|
hi = metas[0].ID
|
||||||
|
|
||||||
|
// Update paging parameters used for next database query.
|
||||||
|
nextPageParams(nextPg, metas[len(metas)-1].ID, order)
|
||||||
|
|
||||||
|
// Allocate slice of expected required API models.
|
||||||
|
apiStatuses = make([]*apimodel.Status, 0, len(metas))
|
||||||
|
|
||||||
|
// Prepare frontend API models for
|
||||||
|
// the cached statuses. For now this
|
||||||
|
// also does its own extra filtering.
|
||||||
|
apiStatuses = prepareStatuses(ctx,
|
||||||
|
metas,
|
||||||
|
prepareAPI,
|
||||||
|
apiStatuses,
|
||||||
|
limit,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no cached timeline statuses
|
// If no cached timeline statuses
|
||||||
// were found for page, we need to
|
// were found for page, we need to
|
||||||
// call through to the database.
|
// call through to the database.
|
||||||
if len(apiStatuses) == 0 {
|
if len(apiStatuses) == 0 {
|
||||||
var err error
|
|
||||||
|
|
||||||
// Pass through to main timeline db load function.
|
// Pass through to main timeline db load function.
|
||||||
apiStatuses, lo, hi, err = loadStatusTimeline(ctx,
|
apiStatuses, lo, hi, err = loadStatusTimeline(ctx,
|
||||||
|
|
@ -460,63 +435,13 @@ func (t *StatusTimeline) Load(
|
||||||
return apiStatuses, lo, hi, nil
|
return apiStatuses, lo, hi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadStatusTimeline is a function that may be used to load a timeline
|
// loadStatusTimeline encapsulates the logic of iteratively
|
||||||
// page in a functionally similar way to StatusTimeline{}.Load(), but without
|
// attempting to load a status timeline page from the database,
|
||||||
// actually having access to a StatusTimeline{}. For example, for timelines that
|
// that is in the form of given callback functions. these will
|
||||||
// we want to share code, but without yet implementing a cache for them. Note this
|
// then be prepared to frontend API models for return.
|
||||||
// function may be removed in the future when un-needed.
|
|
||||||
func LoadStatusTimeline(
|
|
||||||
ctx context.Context,
|
|
||||||
page *paging.Page,
|
|
||||||
loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error),
|
|
||||||
filter func(each *gtsmodel.Status) (delete bool, err error),
|
|
||||||
prepareAPI func(status *gtsmodel.Status) (apiStatus *apimodel.Status, err error),
|
|
||||||
) (
|
|
||||||
[]*apimodel.Status,
|
|
||||||
string, // lo
|
|
||||||
string, // hi
|
|
||||||
error,
|
|
||||||
) {
|
|
||||||
// Use a copy of current page so
|
|
||||||
// we can repeatedly update it.
|
|
||||||
nextPg := new(paging.Page)
|
|
||||||
*nextPg = *page
|
|
||||||
|
|
||||||
// Pass through to main timeline db load function.
|
|
||||||
apiStatuses, lo, hi, err := loadStatusTimeline(ctx,
|
|
||||||
nextPg,
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
loadPage,
|
|
||||||
filter,
|
|
||||||
prepareAPI,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if page.Order().Ascending() {
|
|
||||||
// The caller always expects the statuses
|
|
||||||
// to be returned in DESC order, but we
|
|
||||||
// build the status slice in paging order.
|
|
||||||
// If paging ASC, we need to reverse the
|
|
||||||
// returned statuses and paging values.
|
|
||||||
slices.Reverse(apiStatuses)
|
|
||||||
lo, hi = hi, lo
|
|
||||||
}
|
|
||||||
|
|
||||||
return apiStatuses, lo, hi, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadStatusTimeline encapsulates most of the main
|
|
||||||
// timeline-load-from-database logic, allowing both
|
|
||||||
// the temporary LoadStatusTimeline() function AND
|
|
||||||
// the main StatusTimeline{}.Load() function to share
|
|
||||||
// as much logic as possible.
|
|
||||||
//
|
//
|
||||||
// TODO: it may be worth moving this into StatusTimeline{}.Load()
|
// in time it may make sense to move this logic
|
||||||
// once the temporary function above has been removed. Or it may
|
// into the StatusTimeline{}.Load() function.
|
||||||
// still be worth keeping *some* database logic separate.
|
|
||||||
func loadStatusTimeline(
|
func loadStatusTimeline(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
nextPg *paging.Page,
|
nextPg *paging.Page,
|
||||||
|
|
@ -793,13 +718,7 @@ func (t *StatusTimeline) Trim() { t.cache.Trim(t.cut, structr.Asc) }
|
||||||
|
|
||||||
// Clear will mark the entire timeline as requiring preload,
|
// Clear will mark the entire timeline as requiring preload,
|
||||||
// which will trigger a clear and reload of the entire thing.
|
// which will trigger a clear and reload of the entire thing.
|
||||||
func (t *StatusTimeline) Clear() {
|
func (t *StatusTimeline) Clear() { t.preloader.clear() }
|
||||||
t.preload.Store(func() *any {
|
|
||||||
var b bool
|
|
||||||
a := any(b)
|
|
||||||
return &a
|
|
||||||
}())
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareStatuses takes a slice of cached (or, freshly loaded!) StatusMeta{}
|
// prepareStatuses takes a slice of cached (or, freshly loaded!) StatusMeta{}
|
||||||
// models, and use given function to return prepared frontend API models.
|
// models, and use given function to return prepared frontend API models.
|
||||||
|
|
|
||||||
|
|
@ -128,42 +128,31 @@ func (p *Processor) getStatusTimeline(
|
||||||
return apiStatus, nil
|
return apiStatus, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeline != nil {
|
// Load status page via timeline cache, also
|
||||||
// Load status page via timeline cache, also
|
// getting lo, hi values for next, prev pages.
|
||||||
// getting lo, hi values for next, prev pages.
|
apiStatuses, lo, hi, err = timeline.Load(ctx,
|
||||||
apiStatuses, lo, hi, err = timeline.Load(ctx,
|
|
||||||
|
|
||||||
// Status page
|
// Status page
|
||||||
// to load.
|
// to load.
|
||||||
page,
|
page,
|
||||||
|
|
||||||
// Caller provided database
|
// Caller provided database
|
||||||
// status page loading function.
|
// status page loading function.
|
||||||
loadPage,
|
loadPage,
|
||||||
|
|
||||||
// Status load function for cached timeline entries.
|
// Status load function for cached timeline entries.
|
||||||
func(ids []string) ([]*gtsmodel.Status, error) {
|
func(ids []string) ([]*gtsmodel.Status, error) {
|
||||||
return p.state.DB.GetStatusesByIDs(ctx, ids)
|
return p.state.DB.GetStatusesByIDs(ctx, ids)
|
||||||
},
|
},
|
||||||
|
|
||||||
// Filtering function,
|
// Filtering function,
|
||||||
// i.e. filter before caching.
|
// i.e. filter before caching.
|
||||||
filter,
|
filter,
|
||||||
|
|
||||||
// Frontend API model
|
// Frontend API model
|
||||||
// preparation function.
|
// preparation function.
|
||||||
prepare,
|
prepare,
|
||||||
)
|
)
|
||||||
} else {
|
|
||||||
// Load status page without a receiving timeline cache.
|
|
||||||
// TODO: remove this code path when all support caching.
|
|
||||||
apiStatuses, lo, hi, err = timelinepkg.LoadStatusTimeline(ctx,
|
|
||||||
page,
|
|
||||||
loadPage,
|
|
||||||
filter,
|
|
||||||
prepare,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := gtserror.Newf("error loading timeline: %w", err)
|
err := gtserror.Newf("error loading timeline: %w", err)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue