further work rewriting timeline caching

This commit is contained in:
kim 2025-02-03 17:00:33 +00:00
commit 49d9a008d9
6 changed files with 230 additions and 327 deletions

View file

@ -18,13 +18,10 @@
package cache
import (
"maps"
"slices"
"sync/atomic"
"codeberg.org/gruf/go-cache/v3/simple"
"codeberg.org/gruf/go-structr"
"github.com/superseriousbusiness/gotosocial/internal/paging"
)
// SliceCache wraps a simple.Cache to provide simple loader-callback
@ -193,218 +190,3 @@ func (c *StructCache[T]) InvalidateIDs(index string, ids []string) {
// Pass to main invalidate func.
c.Cache.Invalidate(i, keys...)
}
type TimelineCache[T any] struct {
structr.Timeline[T, string]
index map[string]*structr.Index
maxSz int
}
func (t *TimelineCache[T]) Init(config structr.TimelineConfig[T, string], maxSz int) {
t.index = make(map[string]*structr.Index, len(config.Indices))
t.Timeline = structr.Timeline[T, string]{}
t.Timeline.Init(config)
for _, cfg := range config.Indices {
t.index[cfg.Fields] = t.Timeline.Index(cfg.Fields)
}
t.maxSz = maxSz
}
func toDirection(order paging.Order) structr.Direction {
switch order {
case paging.OrderAscending:
return structr.Asc
case paging.OrderDescending:
return structr.Desc
default:
panic("invalid order")
}
}
func (t *TimelineCache[T]) Select(page *paging.Page) []T {
min, max := page.Min.Value, page.Max.Value
lim, dir := page.Limit, toDirection(page.Order())
return t.Timeline.Select(min, max, lim, dir)
}
func (t *TimelineCache[T]) Invalidate(index string, keyParts ...any) {
i := t.index[index]
t.Timeline.Invalidate(i, i.Key(keyParts...))
}
func (t *TimelineCache[T]) Trim(perc float64) {
t.Timeline.Trim(perc, t.maxSz, structr.Asc)
}
func (t *TimelineCache[T]) InvalidateIDs(index string, ids []string) {
i := t.index[index]
if i == nil {
// we only perform this check here as
// we're going to use the index before
// passing it to cache in main .Load().
panic("missing index for cache type")
}
// Generate cache keys for ID types.
keys := make([]structr.Key, len(ids))
for x, id := range ids {
keys[x] = i.Key(id)
}
// Pass to main invalidate func.
t.Timeline.Invalidate(i, keys...)
}
// TimelinesCache provides a cache of TimelineCache{}
// objects, keyed by string and concurrency safe, optimized
// almost entirely for reads. On each creation of a new key
// in the cache, the entire internal map will be cloned, BUT
// all reads are only a single atomic operation, no mutex locks!
type TimelinesCache[T any] struct {
cfg structr.TimelineConfig[T, string]
ptr atomic.Pointer[map[string]*TimelineCache[T]] // ronly except by CAS
max int
}
// Init ...
func (t *TimelinesCache[T]) Init(config structr.TimelineConfig[T, string], max int) {
// Create new test timeline to validate.
(&TimelineCache[T]{}).Init(config, max)
// Invalidate
// timeline maps.
t.ptr.Store(nil)
// Set config.
t.cfg = config
t.max = max
}
// Get fetches a timeline with given ID from cache, creating it if required.
func (t *TimelinesCache[T]) Get(id string) *TimelineCache[T] {
var tt *TimelineCache[T]
for {
// Load current ptr.
cur := t.ptr.Load()
// Get timeline map to work on.
var m map[string]*TimelineCache[T]
if cur != nil {
// Look for existing
// timeline in cache.
tt = (*cur)[id]
if tt != nil {
return tt
}
// Get clone of current
// before modifications.
m = maps.Clone(*cur)
} else {
// Allocate new timeline map for below.
m = make(map[string]*TimelineCache[T])
}
if tt == nil {
// Allocate new timeline.
tt = new(TimelineCache[T])
tt.Init(t.cfg, t.max)
}
// Store timeline
// in new map.
m[id] = tt
// Attempt to update the map ptr.
if !t.ptr.CompareAndSwap(cur, &m) {
// We failed the
// CAS, reloop.
continue
}
// Successfully inserted
// new timeline model.
return tt
}
}
// Delete removes timeline with ID from cache.
func (t *TimelinesCache[T]) Delete(id string) {
for {
// Load current ptr.
cur := t.ptr.Load()
// Check for empty map / not in map.
if cur == nil || (*cur)[id] == nil {
return
}
// Get clone of current
// before modifications.
m := maps.Clone(*cur)
// Delete ID.
delete(m, id)
// Attempt to update the map ptr.
if !t.ptr.CompareAndSwap(cur, &m) {
// We failed the
// CAS, reloop.
continue
}
// Successfully
// deleted ID.
return
}
}
func (t *TimelinesCache[T]) Insert(values ...T) {
if p := t.ptr.Load(); p != nil {
for _, timeline := range *p {
timeline.Insert(values...)
}
}
}
func (t *TimelinesCache[T]) InsertInto(id string, values ...T) {
t.Get(id).Insert(values...)
}
func (t *TimelinesCache[T]) Invalidate(index string, keyParts ...any) {
if p := t.ptr.Load(); p != nil {
for _, timeline := range *p {
timeline.Invalidate(index, keyParts...)
}
}
}
func (t *TimelinesCache[T]) InvalidateFrom(id string, index string, keyParts ...any) {
t.Get(id).Invalidate(index, keyParts...)
}
func (t *TimelinesCache[T]) InvalidateIDs(index string, ids []string) {
if p := t.ptr.Load(); p != nil {
for _, timeline := range *p {
timeline.InvalidateIDs(index, ids)
}
}
}
func (t *TimelinesCache[T]) InvalidateIDsFrom(id string, index string, ids []string) {
t.Get(id).InvalidateIDs(index, ids)
}
func (t *TimelinesCache[T]) Trim(perc float64) {
if p := t.ptr.Load(); p != nil {
for _, timeline := range *p {
timeline.Trim(perc)
}
}
}
func (t *TimelinesCache[T]) Clear(id string) { t.Get(id).Clear() }