mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 03:22:24 -05:00 
			
		
		
		
	* start work rewriting timeline cache type
* further work rewriting timeline caching
* more work integration new timeline code
* remove old code
* add local timeline, fix up merge conflicts
* remove old use of go-bytes
* implement new timeline code into more areas of codebase, pull in latest go-mangler, go-mutexes, go-structr
* remove old timeline package, add local timeline cache
* remove references to old timeline types that needed starting up in tests
* start adding page validation
* fix test-identified timeline cache package issues
* fix up more tests, fix missing required changes, etc
* add exclusion for test.out in gitignore
* clarify some things better in code comments
* tweak cache size limits
* fix list timeline cache fetching
* further list timeline fixes
* linter, ssssssssshhhhhhhhhhhh please
* fix linter hints
* reslice the output if it's beyond length of 'lim'
* remove old timeline initialization code, bump go-structr to v0.9.4
* continued from previous commit
* improved code comments
* don't allow multiple entries for BoostOfID values to prevent repeated boosts of same boosts
* finish writing more code comments
* some variable renaming, for ease of following
* change the way we update lo,hi paging values during timeline load
* improved code comments for updated / returned lo , hi paging values
* finish writing code comments for the StatusTimeline{} type itself
* fill in more code comments
* update go-structr version to latest with changed timeline unique indexing logic
* have a local and public timeline *per user*
* rewrite calls to public / local timeline calls
* remove the zero length check, as lo, hi values might still be set
* simplify timeline cache loading, fix lo/hi returns, fix timeline invalidation side-effects missing for some federated actions
* swap the lo, hi values 🤦
* add (now) missing slice reverse of tag timeline statuses when paging ASC
* remove local / public caches (is out of scope for this work), share more timeline code
* remove unnecessary change
* again, remove more unused code
* remove unused function to appease the linter
* move boost checking to prepare function
* fix use of timeline.lastOrder, fix incorrect range functions used
* remove comments for repeat code
* remove the boost logic from prepare function
* do a maximum of 5 loads, not 10
* add repeat boost filtering logic, update go-structr, general improvements
* more code comments
* add important note
* fix timeline tests now that timelines are returned in page order
* remove unused field
* add StatusTimeline{} tests
* add more status timeline tests
* start adding preloading support
* ensure repeat boosts are marked in preloaded entries
* share a bunch of the database load code in timeline cache, don't clear timelines on relationship change
* add logic to allow dynamic clear / preloading of timelines
* comment-out unused functions, but leave in place as we might end-up using them
* fix timeline preload state check
* much improved status timeline code comments
* more code comments, don't bother inserting statuses if timeline not preloaded
* shift around some logic to make sure things aren't accidentally left set
* finish writing code comments
* remove trim-after-insert behaviour
* fix-up some comments referring to old logic
* remove unsetting of lo, hi
* fix preload repeatBoost checking logic
* don't return on status filter errors, these are usually transient
* better concurrency safety in Clear() and Done()
* fix test broken due to addition of preloader
* fix repeatBoost logic that doesn't account for already-hidden repeatBoosts
* ensure edit submodels are dropped on cache insertion
* update code-comment to expand CAS accronym
* use a plus1hULID() instead of 24h
* remove unused functions
* add note that public / local timeline requester can be nil
* fix incorrect visibility filtering of tag timeline statuses
* ensure we filter home timeline statuses on local only
* some small re-orderings to confirm query params in correct places
* fix the local only home timeline filter func
		
	
			
		
			
				
	
	
		
			469 lines
		
	
	
	
		
			10 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			469 lines
		
	
	
	
		
			10 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package structr
 | |
| 
 | |
| import (
 | |
| 	"fmt"
 | |
| 	"os"
 | |
| 	"reflect"
 | |
| 	"strings"
 | |
| 	"sync"
 | |
| 	"unsafe"
 | |
| 
 | |
| 	"codeberg.org/gruf/go-byteutil"
 | |
| )
 | |
| 
 | |
| // IndexConfig defines config variables
 | |
| // for initializing a struct index.
 | |
| type IndexConfig struct {
 | |
| 
 | |
| 	// Fields should contain a comma-separated
 | |
| 	// list of struct fields used when generating
 | |
| 	// keys for this index. Nested fields should
 | |
| 	// be specified using periods. An example:
 | |
| 	// "Username,Favorites.Color"
 | |
| 	//
 | |
| 	// Note that nested fields where the nested
 | |
| 	// struct field is a ptr are supported, but
 | |
| 	// nil ptr values in nesting will result in
 | |
| 	// that particular value NOT being indexed.
 | |
| 	// e.g. with "Favorites.Color" if *Favorites
 | |
| 	// is nil then it will not be indexed.
 | |
| 	//
 | |
| 	// Field types supported include any of those
 | |
| 	// supported by the `go-mangler` library.
 | |
| 	Fields string
 | |
| 
 | |
| 	// Multiple indicates whether to accept multiple
 | |
| 	// possible values for any single index key. The
 | |
| 	// default behaviour is to only accept one value
 | |
| 	// and overwrite existing on any write operation.
 | |
| 	Multiple bool
 | |
| 
 | |
| 	// AllowZero indicates whether to accept zero
 | |
| 	// value fields in index keys. i.e. whether to
 | |
| 	// index structs for this set of field values
 | |
| 	// IF any one of those field values is the zero
 | |
| 	// value for that type. The default behaviour
 | |
| 	// is to skip indexing structs for this lookup
 | |
| 	// when any of the indexing fields are zero.
 | |
| 	AllowZero bool
 | |
| }
 | |
| 
 | |
| // Index is an exposed Cache internal model, used to
 | |
| // extract struct keys, generate hash checksums for them
 | |
| // and store struct results by the init defined config.
 | |
| // This model is exposed to provide faster lookups in the
 | |
| // case that you would like to manually provide the used
 | |
| // index via the Cache.___By() series of functions, or
 | |
| // access the underlying index key generator.
 | |
| type Index struct {
 | |
| 
 | |
| 	// ptr is a pointer to
 | |
| 	// the source Cache/Queue
 | |
| 	// index is attached to.
 | |
| 	ptr unsafe.Pointer
 | |
| 
 | |
| 	// name is the actual name of this
 | |
| 	// index, which is the unparsed
 | |
| 	// string value of contained fields.
 | |
| 	name string
 | |
| 
 | |
| 	// backing data store of the index, containing
 | |
| 	// the cached results contained within wrapping
 | |
| 	// index_entry{} which also contains the exact
 | |
| 	// key each result is stored under. the hash map
 | |
| 	// only keys by the xxh3 hash checksum for speed.
 | |
| 	data hashmap
 | |
| 
 | |
| 	// struct fields encompassed by
 | |
| 	// keys (+ hashes) of this index.
 | |
| 	fields []struct_field
 | |
| 
 | |
| 	// index flags:
 | |
| 	// - 1 << 0 = unique
 | |
| 	// - 1 << 1 = allow zero
 | |
| 	flags uint8
 | |
| }
 | |
| 
 | |
| // Name returns the receiving Index name.
 | |
| func (i *Index) Name() string {
 | |
| 	return i.name
 | |
| }
 | |
| 
 | |
| // Key generates Key{} from given parts for
 | |
| // the type of lookup this Index uses in cache.
 | |
| // NOTE: panics on incorrect no. parts / types given.
 | |
| func (i *Index) Key(parts ...any) Key {
 | |
| 	ptrs := make([]unsafe.Pointer, len(parts))
 | |
| 	for x, part := range parts {
 | |
| 		ptrs[x] = eface_data(part)
 | |
| 	}
 | |
| 	buf := new_buffer()
 | |
| 	key := i.key(buf, ptrs)
 | |
| 	free_buffer(buf)
 | |
| 	return Key{
 | |
| 		raw: parts,
 | |
| 		key: key,
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Keys generates []Key{} from given (multiple) parts
 | |
| // for the type of lookup this Index uses in the cache.
 | |
| // NOTE: panics on incorrect no. parts / types given.
 | |
| func (i *Index) Keys(parts ...[]any) []Key {
 | |
| 	keys := make([]Key, 0, len(parts))
 | |
| 	buf := new_buffer()
 | |
| 	for _, parts := range parts {
 | |
| 		ptrs := make([]unsafe.Pointer, len(parts))
 | |
| 		for x, part := range parts {
 | |
| 			ptrs[x] = eface_data(part)
 | |
| 		}
 | |
| 		key := i.key(buf, ptrs)
 | |
| 		if key == "" {
 | |
| 			continue
 | |
| 		}
 | |
| 		keys = append(keys, Key{
 | |
| 			raw: parts,
 | |
| 			key: key,
 | |
| 		})
 | |
| 	}
 | |
| 	free_buffer(buf)
 | |
| 	return keys
 | |
| }
 | |
| 
 | |
| // init will initialize the cache with given type, config and capacity.
 | |
| func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
 | |
| 	switch {
 | |
| 	// The only 2 types we support are
 | |
| 	// structs, and ptrs to a struct.
 | |
| 	case t.Kind() == reflect.Struct:
 | |
| 	case t.Kind() == reflect.Pointer &&
 | |
| 		t.Elem().Kind() == reflect.Struct:
 | |
| 	default:
 | |
| 		panic("index only support struct{} and *struct{}")
 | |
| 	}
 | |
| 
 | |
| 	// Set name from the raw
 | |
| 	// struct fields string.
 | |
| 	i.name = cfg.Fields
 | |
| 
 | |
| 	// Set struct flags.
 | |
| 	if cfg.AllowZero {
 | |
| 		set_allow_zero(&i.flags)
 | |
| 	}
 | |
| 	if !cfg.Multiple {
 | |
| 		set_is_unique(&i.flags)
 | |
| 	}
 | |
| 
 | |
| 	// Split to get containing struct fields.
 | |
| 	fields := strings.Split(cfg.Fields, ",")
 | |
| 
 | |
| 	// Preallocate expected struct field slice.
 | |
| 	i.fields = make([]struct_field, len(fields))
 | |
| 	for x, name := range fields {
 | |
| 
 | |
| 		// Split name to account for nesting.
 | |
| 		names := strings.Split(name, ".")
 | |
| 
 | |
| 		// Look for usable struct field.
 | |
| 		i.fields[x] = find_field(t, names)
 | |
| 	}
 | |
| 
 | |
| 	// Initialize store for
 | |
| 	// index_entry lists.
 | |
| 	i.data.Init(cap)
 | |
| }
 | |
| 
 | |
| // get_one will fetch one indexed item under key.
 | |
| func (i *Index) get_one(key Key) *indexed_item {
 | |
| 	// Get list at hash.
 | |
| 	l := i.data.Get(key.key)
 | |
| 	if l == nil {
 | |
| 		return nil
 | |
| 	}
 | |
| 
 | |
| 	// Extract entry from first list elem.
 | |
| 	entry := (*index_entry)(l.head.data)
 | |
| 
 | |
| 	return entry.item
 | |
| }
 | |
| 
 | |
| // get will fetch all indexed items under key, passing each to hook.
 | |
| func (i *Index) get(key string, hook func(*indexed_item)) {
 | |
| 	if hook == nil {
 | |
| 		panic("nil hook")
 | |
| 	}
 | |
| 
 | |
| 	// Get list at hash.
 | |
| 	l := i.data.Get(key)
 | |
| 	if l == nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// Iterate the list.
 | |
| 	for elem := l.head; //
 | |
| 	elem != nil;        //
 | |
| 	{
 | |
| 		// Get next before
 | |
| 		// any modification.
 | |
| 		next := elem.next
 | |
| 
 | |
| 		// Extract element entry + item.
 | |
| 		entry := (*index_entry)(elem.data)
 | |
| 		item := entry.item
 | |
| 
 | |
| 		// Pass to hook.
 | |
| 		hook(item)
 | |
| 
 | |
| 		// Set next.
 | |
| 		elem = next
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // key uses hasher to generate Key{} from given raw parts.
 | |
| func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
 | |
| 	buf.B = buf.B[:0]
 | |
| 	if len(parts) != len(i.fields) {
 | |
| 		panic(fmt.Sprintf("incorrect number key parts: want=%d received=%d",
 | |
| 			len(i.fields),
 | |
| 			len(parts),
 | |
| 		))
 | |
| 	}
 | |
| 	if !allow_zero(i.flags) {
 | |
| 		for x, field := range i.fields {
 | |
| 			before := len(buf.B)
 | |
| 			buf.B = field.mangle(buf.B, parts[x])
 | |
| 			if string(buf.B[before:]) == field.zerostr {
 | |
| 				return ""
 | |
| 			}
 | |
| 			buf.B = append(buf.B, '.')
 | |
| 		}
 | |
| 	} else {
 | |
| 		for x, field := range i.fields {
 | |
| 			buf.B = field.mangle(buf.B, parts[x])
 | |
| 			buf.B = append(buf.B, '.')
 | |
| 		}
 | |
| 	}
 | |
| 	return string(buf.B)
 | |
| }
 | |
| 
 | |
| // add will attempt to add given index entry to appropriate
 | |
| // doubly-linked-list in index hashmap. in the case of an
 | |
| // existing entry in a "unique" index, it will return false.
 | |
| func (i *Index) add(key string, item *indexed_item) bool {
 | |
| 	// Look for existing.
 | |
| 	l := i.data.Get(key)
 | |
| 
 | |
| 	if l == nil {
 | |
| 
 | |
| 		// Allocate new.
 | |
| 		l = new_list()
 | |
| 		i.data.Put(key, l)
 | |
| 
 | |
| 	} else if is_unique(i.flags) {
 | |
| 
 | |
| 		// Collision!
 | |
| 		return false
 | |
| 	}
 | |
| 
 | |
| 	// Prepare new index entry.
 | |
| 	entry := new_index_entry()
 | |
| 	entry.item = item
 | |
| 	entry.key = key
 | |
| 	entry.index = i
 | |
| 
 | |
| 	// Add ourselves to item's index tracker.
 | |
| 	item.indexed = append(item.indexed, entry)
 | |
| 
 | |
| 	// Add entry to index list.
 | |
| 	l.push_front(&entry.elem)
 | |
| 	return true
 | |
| }
 | |
| 
 | |
| // append will append the given index entry to appropriate
 | |
| // doubly-linked-list in index hashmap. this handles case of
 | |
| // overwriting "unique" index entries, and removes from given
 | |
| // outer linked-list in the case that it is no longer indexed.
 | |
| func (i *Index) append(key string, item *indexed_item) (evicted *indexed_item) {
 | |
| 	// Look for existing.
 | |
| 	l := i.data.Get(key)
 | |
| 
 | |
| 	if l == nil {
 | |
| 
 | |
| 		// Allocate new.
 | |
| 		l = new_list()
 | |
| 		i.data.Put(key, l)
 | |
| 
 | |
| 	} else if is_unique(i.flags) {
 | |
| 
 | |
| 		// Remove head.
 | |
| 		elem := l.head
 | |
| 		l.remove(elem)
 | |
| 
 | |
| 		// Drop index from inner item,
 | |
| 		// catching the evicted item.
 | |
| 		e := (*index_entry)(elem.data)
 | |
| 		evicted = e.item
 | |
| 		evicted.drop_index(e)
 | |
| 
 | |
| 		// Free unused entry.
 | |
| 		free_index_entry(e)
 | |
| 
 | |
| 		if len(evicted.indexed) != 0 {
 | |
| 			// Evicted is still stored
 | |
| 			// under index, don't return.
 | |
| 			evicted = nil
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Prepare new index entry.
 | |
| 	entry := new_index_entry()
 | |
| 	entry.item = item
 | |
| 	entry.key = key
 | |
| 	entry.index = i
 | |
| 
 | |
| 	// Add ourselves to item's index tracker.
 | |
| 	item.indexed = append(item.indexed, entry)
 | |
| 
 | |
| 	// Add entry to index list.
 | |
| 	l.push_front(&entry.elem)
 | |
| 	return
 | |
| }
 | |
| 
 | |
| // delete will remove all indexed items under key, passing each to hook.
 | |
| func (i *Index) delete(key string, hook func(*indexed_item)) {
 | |
| 	if hook == nil {
 | |
| 		panic("nil hook")
 | |
| 	}
 | |
| 
 | |
| 	// Get list at hash.
 | |
| 	l := i.data.Get(key)
 | |
| 	if l == nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// Delete at hash.
 | |
| 	i.data.Delete(key)
 | |
| 
 | |
| 	// Iterate the list.
 | |
| 	for elem := l.head; //
 | |
| 	elem != nil;        //
 | |
| 	{
 | |
| 		// Get next before
 | |
| 		// any modification.
 | |
| 		next := elem.next
 | |
| 
 | |
| 		// Remove elem.
 | |
| 		l.remove(elem)
 | |
| 
 | |
| 		// Extract element entry + item.
 | |
| 		entry := (*index_entry)(elem.data)
 | |
| 		item := entry.item
 | |
| 
 | |
| 		// Drop index from item.
 | |
| 		item.drop_index(entry)
 | |
| 
 | |
| 		// Free now-unused entry.
 | |
| 		free_index_entry(entry)
 | |
| 
 | |
| 		// Pass to hook.
 | |
| 		hook(item)
 | |
| 
 | |
| 		// Set next.
 | |
| 		elem = next
 | |
| 	}
 | |
| 
 | |
| 	// Release list.
 | |
| 	free_list(l)
 | |
| }
 | |
| 
 | |
| // delete_entry deletes the given index entry.
 | |
| func (i *Index) delete_entry(entry *index_entry) {
 | |
| 	// Get list at hash sum.
 | |
| 	l := i.data.Get(entry.key)
 | |
| 	if l == nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// Remove list entry.
 | |
| 	l.remove(&entry.elem)
 | |
| 
 | |
| 	if l.len == 0 {
 | |
| 		// Remove entry from map.
 | |
| 		i.data.Delete(entry.key)
 | |
| 
 | |
| 		// Release list.
 | |
| 		free_list(l)
 | |
| 	}
 | |
| 
 | |
| 	// Drop this index from item.
 | |
| 	entry.item.drop_index(entry)
 | |
| }
 | |
| 
 | |
| // index_entry represents a single entry
 | |
| // in an Index{}, where it will be accessible
 | |
| // by Key{} pointing to a containing list{}.
 | |
| type index_entry struct {
 | |
| 
 | |
| 	// list elem that entry is stored
 | |
| 	// within, under containing index.
 | |
| 	// elem.data is ptr to index_entry.
 | |
| 	elem list_elem
 | |
| 
 | |
| 	// index this is stored in.
 | |
| 	index *Index
 | |
| 
 | |
| 	// underlying indexed item.
 | |
| 	item *indexed_item
 | |
| 
 | |
| 	// raw cache key
 | |
| 	// for this entry.
 | |
| 	key string
 | |
| }
 | |
| 
 | |
| var index_entry_pool sync.Pool
 | |
| 
 | |
| // new_index_entry returns a new prepared index_entry.
 | |
| func new_index_entry() *index_entry {
 | |
| 	v := index_entry_pool.Get()
 | |
| 	if v == nil {
 | |
| 		e := new(index_entry)
 | |
| 		e.elem.data = unsafe.Pointer(e)
 | |
| 		v = e
 | |
| 	}
 | |
| 	entry := v.(*index_entry)
 | |
| 	return entry
 | |
| }
 | |
| 
 | |
| // free_index_entry releases the index_entry.
 | |
| func free_index_entry(entry *index_entry) {
 | |
| 	if entry.elem.next != nil ||
 | |
| 		entry.elem.prev != nil {
 | |
| 		msg := assert("entry not in use")
 | |
| 		os.Stderr.WriteString(msg + "\n")
 | |
| 		return
 | |
| 	}
 | |
| 	entry.key = ""
 | |
| 	entry.index = nil
 | |
| 	entry.item = nil
 | |
| 	index_entry_pool.Put(entry)
 | |
| }
 | |
| 
 | |
| func is_unique(f uint8) bool {
 | |
| 	const mask = uint8(1) << 0
 | |
| 	return f&mask != 0
 | |
| }
 | |
| 
 | |
| func set_is_unique(f *uint8) {
 | |
| 	const mask = uint8(1) << 0
 | |
| 	(*f) |= mask
 | |
| }
 | |
| 
 | |
| func allow_zero(f uint8) bool {
 | |
| 	const mask = uint8(1) << 1
 | |
| 	return f&mask != 0
 | |
| }
 | |
| 
 | |
| func set_allow_zero(f *uint8) {
 | |
| 	const mask = uint8(1) << 1
 | |
| 	(*f) |= mask
 | |
| }
 |