mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 11:52:24 -05:00 
			
		
		
		
	more work integration new timeline code
This commit is contained in:
		
					parent
					
						
							
								49d9a008d9
							
						
					
				
			
			
				commit
				
					
						771fbe2d5e
					
				
			
		
					 14 changed files with 419 additions and 606 deletions
				
			
		
							
								
								
									
										23
									
								
								internal/cache/db.go
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								internal/cache/db.go
									
										
									
									
										vendored
									
									
								
							|  | @ -1311,29 +1311,6 @@ func (c *Caches) initStatus() { | |||
| 
 | ||||
| 	log.Infof(nil, "cache size = %d", cap) | ||||
| 
 | ||||
| 	copyF := func(s1 *gtsmodel.Status) *gtsmodel.Status { | ||||
| 		s2 := new(gtsmodel.Status) | ||||
| 		*s2 = *s1 | ||||
| 
 | ||||
| 		// Don't include ptr fields that | ||||
| 		// will be populated separately. | ||||
| 		// See internal/db/bundb/status.go. | ||||
| 		s2.Account = nil | ||||
| 		s2.InReplyTo = nil | ||||
| 		s2.InReplyToAccount = nil | ||||
| 		s2.BoostOf = nil | ||||
| 		s2.BoostOfAccount = nil | ||||
| 		s2.Poll = nil | ||||
| 		s2.Attachments = nil | ||||
| 		s2.Tags = nil | ||||
| 		s2.Mentions = nil | ||||
| 		s2.Emojis = nil | ||||
| 		s2.CreatedWithApplication = nil | ||||
| 		s2.Edits = nil | ||||
| 
 | ||||
| 		return s2 | ||||
| 	} | ||||
| 
 | ||||
| 	c.DB.Status.Init(structr.CacheConfig[*gtsmodel.Status]{ | ||||
| 		Indices: []structr.IndexConfig{ | ||||
| 			{Fields: "ID"}, | ||||
|  |  | |||
							
								
								
									
										299
									
								
								internal/cache/timeline/status.go
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										299
									
								
								internal/cache/timeline/status.go
									
										
									
									
										vendored
									
									
								
							|  | @ -30,6 +30,7 @@ import ( | |||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/log" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/paging" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/util/xslices" | ||||
| ) | ||||
| 
 | ||||
| // StatusMeta contains minimum viable metadata | ||||
|  | @ -59,6 +60,12 @@ type StatusMeta struct { | |||
| 	loaded *gtsmodel.Status | ||||
| } | ||||
| 
 | ||||
| // isLoaded is a small utility func that can fill | ||||
| // the slices.DeleteFunc() signature requirements. | ||||
| func (m *StatusMeta) isLoaded() bool { | ||||
| 	return m.loaded == nil | ||||
| } | ||||
| 
 | ||||
| // StatusTimelines ... | ||||
| type StatusTimelines struct { | ||||
| 	ptr atomic.Pointer[map[string]*StatusTimeline] // ronly except by CAS | ||||
|  | @ -266,7 +273,7 @@ func (t *StatusTimeline) Init(cap int) { | |||
| 				AccountID:        s.AccountID, | ||||
| 				BoostOfID:        s.BoostOfID, | ||||
| 				BoostOfAccountID: s.BoostOfAccountID, | ||||
| 				loaded:           nil, // NEVER copied | ||||
| 				loaded:           nil, // NEVER stored | ||||
| 				prepared:         prepared, | ||||
| 			} | ||||
| 		}, | ||||
|  | @ -285,25 +292,28 @@ func (t *StatusTimeline) Load( | |||
| 	page *paging.Page, | ||||
| 
 | ||||
| 	// loadPage should load the timeline of given page for cache hydration. | ||||
| 	loadPage func(page *paging.Page) ([]*gtsmodel.Status, error), | ||||
| 	loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error), | ||||
| 
 | ||||
| 	// loadIDs should load status models with given IDs. | ||||
| 	loadIDs func([]string) ([]*gtsmodel.Status, error), | ||||
| 	// loadIDs should load status models with given IDs, this is used | ||||
| 	// to load status models of already cached entries in the timeline. | ||||
| 	loadIDs func(ids []string) (statuses []*gtsmodel.Status, err error), | ||||
| 
 | ||||
| 	// preFilter can be used to perform filtering of returned | ||||
| 	// statuses BEFORE insert into cache. i.e. this will effect | ||||
| 	// what actually gets stored in the timeline cache. | ||||
| 	preFilter func(*gtsmodel.Status) (bool, error), | ||||
| 	preFilter func(each *gtsmodel.Status) (delete bool, err error), | ||||
| 
 | ||||
| 	// postFilterFn can be used to perform filtering of returned | ||||
| 	// statuses AFTER insert into cache. i.e. this will not effect | ||||
| 	// what actually gets stored in the timeline cache. | ||||
| 	postFilter func(*StatusMeta) bool, | ||||
| 	postFilter func(each *gtsmodel.Status) (delete bool, err error), | ||||
| 
 | ||||
| 	// prepareAPI should prepare internal status model to frontend API model. | ||||
| 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | ||||
| 	prepareAPI func(status *gtsmodel.Status) (apiStatus *apimodel.Status, err error), | ||||
| ) ( | ||||
| 	[]*apimodel.Status, | ||||
| 	string, // lo | ||||
| 	string, // hi | ||||
| 	error, | ||||
| ) { | ||||
| 	switch { | ||||
|  | @ -320,18 +330,54 @@ func (t *StatusTimeline) Load( | |||
| 	ord := page.Order() | ||||
| 	dir := toDirection(ord) | ||||
| 
 | ||||
| 	// Load cached timeline entries for page. | ||||
| 	meta := t.cache.Select(min, max, lim, dir) | ||||
| 	// First we attempt to load status metadata | ||||
| 	// entries from the timeline cache, up to lim. | ||||
| 	metas := t.cache.Select(min, max, lim, dir) | ||||
| 
 | ||||
| 	// Perform any timeline post-filtering. | ||||
| 	meta = doPostFilter(meta, postFilter) | ||||
| 	// Set the starting lo / hi ID paging | ||||
| 	// values. We continually update these | ||||
| 	// for further timeline selections and | ||||
| 	// for returning final next / prev pgs. | ||||
| 	lo, hi := min, max | ||||
| 
 | ||||
| 	// ... | ||||
| 	if need := len(meta) - lim; need > 0 { | ||||
| 	if len(metas) > 0 { | ||||
| 		// Update paging values | ||||
| 		// based on returned data. | ||||
| 		lo, hi = nextPageParams( | ||||
| 			lo, hi, | ||||
| 			metas[len(metas)-1].ID, | ||||
| 			metas[0].ID, | ||||
| 			ord, | ||||
| 		) | ||||
| 
 | ||||
| 		// Set first page | ||||
| 		// query to load. | ||||
| 		nextPg := page | ||||
| 		// Before we can do any filtering, we need | ||||
| 		// to load status models for cached entries. | ||||
| 		err := loadStatuses(ctx, metas, loadIDs) | ||||
| 		if err != nil { | ||||
| 			return nil, "", "", gtserror.Newf("error loading statuses: %w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Drop all entries we failed to load statuses for. | ||||
| 		metas = slices.DeleteFunc(metas, (*StatusMeta).isLoaded) | ||||
| 
 | ||||
| 		// Perform any post-filtering on cached status entries. | ||||
| 		metas, _, err = doStatusPostFilter(metas, postFilter) | ||||
| 		if err != nil { | ||||
| 			return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	var filtered []*StatusMeta | ||||
| 
 | ||||
| 	// Check whether loaded enough from cache. | ||||
| 	if need := len(metas) - lim; need > 0 { | ||||
| 
 | ||||
| 		// Use a copy of current page so | ||||
| 		// we can repeatedly update it. | ||||
| 		nextPg := new(paging.Page) | ||||
| 		*nextPg = *page | ||||
| 		nextPg.Min.Value = lo | ||||
| 		nextPg.Max.Value = hi | ||||
| 
 | ||||
| 		// Perform a maximum of 5 | ||||
| 		// load attempts fetching | ||||
|  | @ -341,7 +387,7 @@ func (t *StatusTimeline) Load( | |||
| 			// Load next timeline statuses. | ||||
| 			statuses, err := loadPage(nextPg) | ||||
| 			if err != nil { | ||||
| 				return nil, gtserror.Newf("error loading timeline: %w", err) | ||||
| 				return nil, "", "", gtserror.Newf("error loading timeline: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			// No more statuses from | ||||
|  | @ -350,59 +396,65 @@ func (t *StatusTimeline) Load( | |||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			// Get the lowest and highest | ||||
| 			// ID values, used for next pg. | ||||
| 			// Done BEFORE status filtering. | ||||
| 			lo := statuses[len(statuses)-1].ID | ||||
| 			hi := statuses[0].ID | ||||
| 			// Update paging values | ||||
| 			// based on returned data. | ||||
| 			lo, hi = nextPageParams( | ||||
| 				lo, hi, | ||||
| 				statuses[len(statuses)-1].ID, | ||||
| 				statuses[0].ID, | ||||
| 				ord, | ||||
| 			) | ||||
| 
 | ||||
| 			// Perform any status timeline pre-filtering. | ||||
| 			statuses, err = doPreFilter(statuses, preFilter) | ||||
| 			// Update paging params. | ||||
| 			nextPg.Min.Value = lo | ||||
| 			nextPg.Max.Value = hi | ||||
| 
 | ||||
| 			// Perform any pre-filtering on newly loaded statuses. | ||||
| 			statuses, err = doStatusPreFilter(statuses, preFilter) | ||||
| 			if err != nil { | ||||
| 				return nil, gtserror.Newf("error pre-filtering timeline: %w", err) | ||||
| 				return nil, "", "", gtserror.Newf("error pre-filtering statuses: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			// After filtering no more | ||||
| 			// statuses remain, retry. | ||||
| 			if len(statuses) == 0 { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			// Convert to our cache type, | ||||
| 			// these will get inserted into | ||||
| 			// the cache in prepare() below. | ||||
| 			m := toStatusMeta(statuses) | ||||
| 			uncached := toStatusMeta(statuses) | ||||
| 
 | ||||
| 			// Perform any post-filtering. | ||||
| 			// and append to main meta slice. | ||||
| 			m = slices.DeleteFunc(m, postFilter) | ||||
| 			meta = append(meta, m...) | ||||
| 			// Perform any post-filtering on recently loaded timeline entries. | ||||
| 			newMetas, newFiltered, err := doStatusPostFilter(uncached, postFilter) | ||||
| 			if err != nil { | ||||
| 				return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			// Append the meta to their relevant slices. | ||||
| 			filtered = append(filtered, newFiltered...) | ||||
| 			metas = append(metas, newMetas...) | ||||
| 
 | ||||
| 			// Check if we reached | ||||
| 			// requested page limit. | ||||
| 			if len(meta) >= lim { | ||||
| 			if len(metas) >= lim { | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			// Set next paging value. | ||||
| 			nextPg = nextPg.Next(lo, hi) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Using meta and given funcs, prepare frontend API models. | ||||
| 	apiStatuses, err := t.prepare(ctx, meta, loadIDs, prepareAPI) | ||||
| 	// Using meta and funcs, prepare frontend API models. | ||||
| 	apiStatuses, err := t.prepare(ctx, metas, prepareAPI) | ||||
| 	if err != nil { | ||||
| 		return nil, gtserror.Newf("error preparing api statuses: %w", err) | ||||
| 		return nil, "", "", gtserror.Newf("error preparing api statuses: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Ensure the returned statuses are ALWAYS in descending order. | ||||
| 	slices.SortFunc(apiStatuses, func(s1, s2 *apimodel.Status) int { | ||||
| 		const k = +1 | ||||
| 		switch { | ||||
| 		case s1.ID > s2.ID: | ||||
| 			return +k | ||||
| 		case s1.ID < s2.ID: | ||||
| 			return -k | ||||
| 		default: | ||||
| 			return 0 | ||||
| 		} | ||||
| 	}) | ||||
| 	// Even if we don't return them, insert | ||||
| 	// the excess (post-filtered) into cache. | ||||
| 	t.cache.Insert(filtered...) | ||||
| 
 | ||||
| 	return apiStatuses, nil | ||||
| 	return apiStatuses, lo, hi, nil | ||||
| } | ||||
| 
 | ||||
| // Insert ... | ||||
|  | @ -543,15 +595,12 @@ func (t *StatusTimeline) Clear() { t.cache.Clear() } | |||
| func (t *StatusTimeline) prepare( | ||||
| 	ctx context.Context, | ||||
| 	meta []*StatusMeta, | ||||
| 	loadIDs func([]string) ([]*gtsmodel.Status, error), | ||||
| 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | ||||
| ) ( | ||||
| 	[]*apimodel.Status, | ||||
| 	error, | ||||
| ) { | ||||
| 	switch { | ||||
| 	case loadIDs == nil: | ||||
| 		panic("nil load fn") | ||||
| 	case prepareAPI == nil: | ||||
| 		panic("nil prepare fn") | ||||
| 	} | ||||
|  | @ -569,39 +618,16 @@ func (t *StatusTimeline) prepare( | |||
| 
 | ||||
| 	// If there were no unprepared | ||||
| 	// StatusMeta objects, then we | ||||
| 	// gathered everything we need! | ||||
| 	// gathered everything we can! | ||||
| 	if len(unprepared) == 0 { | ||||
| 		return apiStatuses, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Of the StatusMeta objects missing a prepared | ||||
| 	// frontend model, find those without a recently | ||||
| 	// fetched database model and store their IDs, | ||||
| 	// as well mapping them for faster update below. | ||||
| 	toLoadIDs := make([]string, len(unprepared)) | ||||
| 	loadedMap := make(map[string]*StatusMeta, len(unprepared)) | ||||
| 	for i, meta := range unprepared { | ||||
| 		if meta.loaded == nil { | ||||
| 			toLoadIDs[i] = meta.ID | ||||
| 			loadedMap[meta.ID] = meta | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Load statuses with given IDs. | ||||
| 	loaded, err := loadIDs(toLoadIDs) | ||||
| 	if err != nil { | ||||
| 		return nil, gtserror.Newf("error loading statuses: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Update returned StatusMeta objects | ||||
| 	// with newly loaded statuses by IDs. | ||||
| 	for i := range loaded { | ||||
| 		status := loaded[i] | ||||
| 		meta := loadedMap[status.ID] | ||||
| 		meta.loaded = status | ||||
| 	} | ||||
| 
 | ||||
| 	// By this point all status objects should | ||||
| 	// be fully populated with loaded models, | ||||
| 	// since they are required for filtering. | ||||
| 	for i := 0; i < len(unprepared); { | ||||
| 
 | ||||
| 		// Get meta at index. | ||||
| 		meta := unprepared[i] | ||||
| 
 | ||||
|  | @ -632,28 +658,61 @@ func (t *StatusTimeline) prepare( | |||
| 	return apiStatuses, nil | ||||
| } | ||||
| 
 | ||||
| // loadStatuses ... | ||||
| func loadStatuses( | ||||
| 	ctx context.Context, | ||||
| 	metas []*StatusMeta, | ||||
| 	loadIDs func([]string) ([]*gtsmodel.Status, error), | ||||
| ) error { | ||||
| 	// ... | ||||
| 	toLoadIDs := make([]string, len(metas)) | ||||
| 	loadedMap := make(map[string]*StatusMeta, len(metas)) | ||||
| 	for i, meta := range metas { | ||||
| 		if meta.loaded == nil { | ||||
| 			toLoadIDs[i] = meta.ID | ||||
| 			loadedMap[meta.ID] = meta | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Load statuses with given IDs. | ||||
| 	loaded, err := loadIDs(toLoadIDs) | ||||
| 	if err != nil { | ||||
| 		return gtserror.Newf("error loading statuses: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Update returned StatusMeta objects | ||||
| 	// with newly loaded statuses by IDs. | ||||
| 	for i := range loaded { | ||||
| 		status := loaded[i] | ||||
| 		meta := loadedMap[status.ID] | ||||
| 		meta.loaded = status | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // toStatusMeta converts a slice of database model statuses | ||||
| // into our cache wrapper type, a slice of []StatusMeta{}. | ||||
| func toStatusMeta(statuses []*gtsmodel.Status) []*StatusMeta { | ||||
| 	meta := make([]*StatusMeta, len(statuses)) | ||||
| 	for i := range statuses { | ||||
| 		status := statuses[i] | ||||
| 		meta[i] = &StatusMeta{ | ||||
| 			ID:               status.ID, | ||||
| 			AccountID:        status.AccountID, | ||||
| 			BoostOfID:        status.BoostOfID, | ||||
| 			BoostOfAccountID: status.BoostOfAccountID, | ||||
| 			Local:            *status.Local, | ||||
| 			loaded:           status, | ||||
| 	return xslices.Gather(nil, statuses, func(s *gtsmodel.Status) *StatusMeta { | ||||
| 		return &StatusMeta{ | ||||
| 			ID:               s.ID, | ||||
| 			AccountID:        s.AccountID, | ||||
| 			BoostOfID:        s.BoostOfID, | ||||
| 			BoostOfAccountID: s.BoostOfAccountID, | ||||
| 			Local:            *s.Local, | ||||
| 			loaded:           s, | ||||
| 			prepared:         nil, | ||||
| 		} | ||||
| 	} | ||||
| 	return meta | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // doPreFilter acts similarly to slices.DeleteFunc but it accepts function with error return, or nil, returning early if so. | ||||
| func doPreFilter(statuses []*gtsmodel.Status, preFilter func(*gtsmodel.Status) (bool, error)) ([]*gtsmodel.Status, error) { | ||||
| 	if preFilter == nil { | ||||
| // ... | ||||
| func doStatusPreFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status) (bool, error)) ([]*gtsmodel.Status, error) { | ||||
| 
 | ||||
| 	// Check for provided | ||||
| 	// filter function. | ||||
| 	if filter == nil { | ||||
| 		return statuses, nil | ||||
| 	} | ||||
| 
 | ||||
|  | @ -662,7 +721,7 @@ func doPreFilter(statuses []*gtsmodel.Status, preFilter func(*gtsmodel.Status) ( | |||
| 		status := statuses[i] | ||||
| 
 | ||||
| 		// Pass through filter func. | ||||
| 		ok, err := preFilter(status) | ||||
| 		ok, err := filter(status) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | @ -680,22 +739,38 @@ func doPreFilter(statuses []*gtsmodel.Status, preFilter func(*gtsmodel.Status) ( | |||
| 	return statuses, nil | ||||
| } | ||||
| 
 | ||||
| // doPostFilter acts similarly to slices.DeleteFunc but it handles case of a nil function. | ||||
| func doPostFilter(statuses []*StatusMeta, postFilter func(*StatusMeta) bool) []*StatusMeta { | ||||
| 	if postFilter == nil { | ||||
| 		return statuses | ||||
| 	} | ||||
| 	return slices.DeleteFunc(statuses, postFilter) | ||||
| // ... | ||||
| func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool, error)) ([]*StatusMeta, []*StatusMeta, error) { | ||||
| 
 | ||||
| 	// Check for provided | ||||
| 	// filter function. | ||||
| 	if filter == nil { | ||||
| 		return metas, nil, nil | ||||
| 	} | ||||
| 
 | ||||
| // toDirection converts page order to timeline direction. | ||||
| func toDirection(o paging.Order) structr.Direction { | ||||
| 	switch o { | ||||
| 	case paging.OrderAscending: | ||||
| 		return structr.Asc | ||||
| 	case paging.OrderDescending: | ||||
| 		return structr.Desc | ||||
| 	default: | ||||
| 		return false | ||||
| 	// Prepare a slice to store filtered statuses. | ||||
| 	filtered := make([]*StatusMeta, 0, len(metas)) | ||||
| 
 | ||||
| 	// Iterate through input metas. | ||||
| 	for i := 0; i < len(metas); { | ||||
| 		meta := metas[i] | ||||
| 
 | ||||
| 		// Pass through filter func. | ||||
| 		ok, err := filter(meta.loaded) | ||||
| 		if err != nil { | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		if ok { | ||||
| 			// Delete meta and add to filtered. | ||||
| 			metas = slices.Delete(metas, i, i+1) | ||||
| 			filtered = append(filtered, meta) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Iter. | ||||
| 		i++ | ||||
| 	} | ||||
| 
 | ||||
| 	return metas, filtered, nil | ||||
| } | ||||
|  |  | |||
							
								
								
									
										30
									
								
								internal/cache/timeline/timeline.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								internal/cache/timeline/timeline.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,30 @@ | |||
| package timeline | ||||
| 
 | ||||
| import ( | ||||
| 	"codeberg.org/gruf/go-structr" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/paging" | ||||
| ) | ||||
| 
 | ||||
| func nextPageParams( | ||||
| 	curLo, curHi string, // current page params | ||||
| 	nextLo, nextHi string, // next lo / hi values | ||||
| 	order paging.Order, | ||||
| ) (lo string, hi string) { | ||||
| 	if order.Ascending() { | ||||
| 
 | ||||
| 	} else /* i.e. descending */ { | ||||
| 
 | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // toDirection converts page order to timeline direction. | ||||
| func toDirection(o paging.Order) structr.Direction { | ||||
| 	switch o { | ||||
| 	case paging.OrderAscending: | ||||
| 		return structr.Asc | ||||
| 	case paging.OrderDescending: | ||||
| 		return structr.Desc | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|  | @ -87,7 +87,7 @@ func (l *listDB) getList(ctx context.Context, lookup string, dbQuery func(*gtsmo | |||
| } | ||||
| 
 | ||||
| func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) { | ||||
| 	listIDs, err := l.getListIDsByAccountID(ctx, accountID) | ||||
| 	listIDs, err := l.GetListIDsByAccountID(ctx, accountID) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -95,7 +95,7 @@ func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]* | |||
| } | ||||
| 
 | ||||
| func (l *listDB) CountListsByAccountID(ctx context.Context, accountID string) (int, error) { | ||||
| 	listIDs, err := l.getListIDsByAccountID(ctx, accountID) | ||||
| 	listIDs, err := l.GetListIDsByAccountID(ctx, accountID) | ||||
| 	return len(listIDs), err | ||||
| } | ||||
| 
 | ||||
|  | @ -176,9 +176,8 @@ func (l *listDB) UpdateList(ctx context.Context, list *gtsmodel.List, columns .. | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := l.state.Timelines.List.RemoveTimeline(ctx, list.ID); err != nil { | ||||
| 		log.Errorf(ctx, "error invalidating list timeline: %q", err) | ||||
| 	} | ||||
| 	// Clear cached timeline associated with list ID. | ||||
| 	l.state.Caches.Timelines.List.Clear(list.ID) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
|  | @ -220,10 +219,13 @@ func (l *listDB) DeleteListByID(ctx context.Context, id string) error { | |||
| 	// Invalidate all related entry caches for this list. | ||||
| 	l.invalidateEntryCaches(ctx, []string{id}, followIDs) | ||||
| 
 | ||||
| 	// Delete the cached timeline of list. | ||||
| 	l.state.Caches.Timelines.List.Delete(id) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (l *listDB) getListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) { | ||||
| func (l *listDB) GetListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) { | ||||
| 	return l.state.Caches.DB.ListIDs.Load("a"+accountID, func() ([]string, error) { | ||||
| 		var listIDs []string | ||||
| 
 | ||||
|  | @ -460,10 +462,8 @@ func (l *listDB) invalidateEntryCaches(ctx context.Context, listIDs, followIDs [ | |||
| 			"f"+listID, | ||||
| 		) | ||||
| 
 | ||||
| 		// Invalidate the timeline for the list this entry belongs to. | ||||
| 		if err := l.state.Timelines.List.RemoveTimeline(ctx, listID); err != nil { | ||||
| 			log.Errorf(ctx, "error invalidating list timeline: %q", err) | ||||
| 		} | ||||
| 		// Invalidate list timeline cache by ID. | ||||
| 		l.state.Caches.Timelines.List.Clear(listID) | ||||
| 	} | ||||
| 
 | ||||
| 	// Invalidate ListedID slice cache entries. | ||||
|  |  | |||
|  | @ -34,6 +34,9 @@ type List interface { | |||
| 	// GetListsByAccountID gets all lists owned by the given accountID. | ||||
| 	GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) | ||||
| 
 | ||||
| 	// GetListIDsByAccountID gets the IDs of all lists owned by the given accountID. | ||||
| 	GetListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) | ||||
| 
 | ||||
| 	// CountListsByAccountID counts the number of lists owned by the given accountID. | ||||
| 	CountListsByAccountID(ctx context.Context, accountID string) (int, error) | ||||
| 
 | ||||
|  |  | |||
|  | @ -306,25 +306,10 @@ func (p *Processor) InvalidateTimelinedStatus(ctx context.Context, accountID str | |||
| 		return gtserror.Newf("db error getting lists for account %s: %w", accountID, err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Start new log entry with | ||||
| 	// the above calling func's name. | ||||
| 	l := log. | ||||
| 		WithContext(ctx). | ||||
| 		WithField("caller", log.Caller(3)). | ||||
| 		WithField("accountID", accountID). | ||||
| 		WithField("statusID", statusID) | ||||
| 
 | ||||
| 	// Unprepare item from home + list timelines, just log | ||||
| 	// if something goes wrong since this is not a showstopper. | ||||
| 
 | ||||
| 	if err := p.state.Timelines.Home.UnprepareItem(ctx, accountID, statusID); err != nil { | ||||
| 		l.Errorf("error unpreparing item from home timeline: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Unprepare item from home + list timelines. | ||||
| 	p.state.Caches.Timelines.Home.MustGet(accountID).UnprepareByStatusIDs(statusID) | ||||
| 	for _, list := range lists { | ||||
| 		if err := p.state.Timelines.List.UnprepareItem(ctx, list.ID, statusID); err != nil { | ||||
| 			l.Errorf("error unpreparing item from list timeline %s: %v", list.ID, err) | ||||
| 		} | ||||
| 		p.state.Caches.Timelines.List.MustGet(list.ID).UnprepareByStatusIDs(statusID) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
|  |  | |||
|  | @ -19,11 +19,9 @@ package timeline | |||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"slices" | ||||
| 	"net/url" | ||||
| 
 | ||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||
| 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||
|  | @ -40,16 +38,14 @@ func (p *Processor) HomeTimelineGet( | |||
| 	*apimodel.PageableResponse, | ||||
| 	gtserror.WithCode, | ||||
| ) { | ||||
| 
 | ||||
| 	// Load timeline data. | ||||
| 	return p.getTimeline(ctx, | ||||
| 	return p.getStatusTimeline(ctx, | ||||
| 
 | ||||
| 		// Auth'd | ||||
| 		// account. | ||||
| 		requester, | ||||
| 
 | ||||
| 		// Home timeline cache for authorized account. | ||||
| 		p.state.Caches.Timelines.Home.Get(requester.ID), | ||||
| 		// Per-account home timeline cache. | ||||
| 		p.state.Caches.Timelines.Home.MustGet(requester.ID), | ||||
| 
 | ||||
| 		// Current | ||||
| 		// page. | ||||
|  | @ -58,70 +54,45 @@ func (p *Processor) HomeTimelineGet( | |||
| 		// Home timeline endpoint. | ||||
| 		"/api/v1/timelines/home", | ||||
| 
 | ||||
| 		// No page | ||||
| 		// query. | ||||
| 		nil, | ||||
| 		// Set local-only timeline | ||||
| 		// page query flag, (this map | ||||
| 		// later gets copied before | ||||
| 		// any further usage). | ||||
| 		func() url.Values { | ||||
| 			if local { | ||||
| 				return localOnlyTrue | ||||
| 			} | ||||
| 			return localOnlyFalse | ||||
| 		}(), | ||||
| 
 | ||||
| 		// Status filter context. | ||||
| 		statusfilter.FilterContextHome, | ||||
| 
 | ||||
| 		// Timeline cache load function, used to further hydrate cache where necessary. | ||||
| 		func(page *paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error) { | ||||
| 
 | ||||
| 			// Fetch requesting account's home timeline page. | ||||
| 			statuses, err = p.state.DB.GetHomeTimeline(ctx, | ||||
| 				requester.ID, | ||||
| 				page, | ||||
| 			) | ||||
| 			if err != nil && !errors.Is(err, db.ErrNoEntries) { | ||||
| 				return nil, nil, gtserror.Newf("error getting statuses: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			if len(statuses) == 0 { | ||||
| 				// No more to load. | ||||
| 				return nil, nil, nil | ||||
| 			} | ||||
| 
 | ||||
| 			// Get the lowest and highest | ||||
| 			// ID values, used for next pg. | ||||
| 			lo := statuses[len(statuses)-1].ID | ||||
| 			hi := statuses[0].ID | ||||
| 
 | ||||
| 			// Set next paging value. | ||||
| 			page = page.Next(lo, hi) | ||||
| 
 | ||||
| 			for i := 0; i < len(statuses); { | ||||
| 				// Get status at idx. | ||||
| 				status := statuses[i] | ||||
| 
 | ||||
| 				// Check whether status should be show on home timeline. | ||||
| 				visible, err := p.visFilter.StatusHomeTimelineable(ctx, | ||||
| 					requester, | ||||
| 					status, | ||||
| 				) | ||||
| 				if err != nil { | ||||
| 					return nil, nil, gtserror.Newf("error checking visibility: %w", err) | ||||
| 				} | ||||
| 
 | ||||
| 				if !visible { | ||||
| 					// Status not visible to home timeline. | ||||
| 					statuses = slices.Delete(statuses, i, i+1) | ||||
| 					continue | ||||
| 				} | ||||
| 
 | ||||
| 				// Iter. | ||||
| 				i++ | ||||
| 			} | ||||
| 
 | ||||
| 			return | ||||
| 		// Database load function. | ||||
| 		func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) { | ||||
| 			return p.state.DB.GetHomeTimeline(ctx, requester.ID, pg) | ||||
| 		}, | ||||
| 
 | ||||
| 		// Per-request filtering function. | ||||
| 		func(s *gtsmodel.Status) bool { | ||||
| 			if local { | ||||
| 				return !*s.Local | ||||
| 		// Pre-filtering function, | ||||
| 		// i.e. filter before caching. | ||||
| 		func(s *gtsmodel.Status) (bool, error) { | ||||
| 
 | ||||
| 			// Check the visibility of passed status to requesting user. | ||||
| 			ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s) | ||||
| 			return !ok, err | ||||
| 		}, | ||||
| 
 | ||||
| 		// Post-filtering function, | ||||
| 		// i.e. filter after caching. | ||||
| 		func(s *gtsmodel.Status) (bool, error) { | ||||
| 
 | ||||
| 			// Remove any non-local statuses | ||||
| 			// if requester wants local-only. | ||||
| 			if local && !*s.Local { | ||||
| 				return true, nil | ||||
| 			} | ||||
| 			return false | ||||
| 
 | ||||
| 			return false, nil | ||||
| 		}, | ||||
| 	) | ||||
| } | ||||
|  |  | |||
|  | @ -20,7 +20,6 @@ package timeline | |||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"slices" | ||||
| 
 | ||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||
|  | @ -65,21 +64,21 @@ func (p *Processor) ListTimelineGet( | |||
| 		return nil, gtserror.NewErrorNotFound(err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Load timeline data. | ||||
| 	return p.getTimeline(ctx, | ||||
| 	// Fetch status timeline for list. | ||||
| 	return p.getStatusTimeline(ctx, | ||||
| 
 | ||||
| 		// Auth'd | ||||
| 		// account. | ||||
| 		requester, | ||||
| 
 | ||||
| 		// List timeline cache for list with ID. | ||||
| 		p.state.Caches.Timelines.List.Get(listID), | ||||
| 		// Per-account home timeline cache. | ||||
| 		p.state.Caches.Timelines.List.MustGet(requester.ID), | ||||
| 
 | ||||
| 		// Current | ||||
| 		// page. | ||||
| 		page, | ||||
| 
 | ||||
| 		// List timeline endpoint. | ||||
| 		// List timeline ID's endpoint. | ||||
| 		"/api/v1/timelines/list/"+listID, | ||||
| 
 | ||||
| 		// No page | ||||
|  | @ -89,59 +88,22 @@ func (p *Processor) ListTimelineGet( | |||
| 		// Status filter context. | ||||
| 		statusfilter.FilterContextHome, | ||||
| 
 | ||||
| 		// Timeline cache load function, used to further hydrate cache where necessary. | ||||
| 		func(page *paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error) { | ||||
| 
 | ||||
| 			// Fetch requesting account's list timeline page. | ||||
| 			statuses, err = p.state.DB.GetListTimeline(ctx, | ||||
| 				listID, | ||||
| 				page, | ||||
| 			) | ||||
| 			if err != nil && !errors.Is(err, db.ErrNoEntries) { | ||||
| 				return nil, nil, gtserror.Newf("error getting statuses: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			if len(statuses) == 0 { | ||||
| 				// No more to load. | ||||
| 				return nil, nil, nil | ||||
| 			} | ||||
| 
 | ||||
| 			// Get the lowest and highest | ||||
| 			// ID values, used for next pg. | ||||
| 			lo := statuses[len(statuses)-1].ID | ||||
| 			hi := statuses[0].ID | ||||
| 
 | ||||
| 			// Set next paging value. | ||||
| 			page = page.Next(lo, hi) | ||||
| 
 | ||||
| 			for i := 0; i < len(statuses); { | ||||
| 				// Get status at idx. | ||||
| 				status := statuses[i] | ||||
| 
 | ||||
| 				// Check whether status should be show on home timeline. | ||||
| 				visible, err := p.visFilter.StatusHomeTimelineable(ctx, | ||||
| 					requester, | ||||
| 					status, | ||||
| 				) | ||||
| 				if err != nil { | ||||
| 					return nil, nil, gtserror.Newf("error checking visibility: %w", err) | ||||
| 				} | ||||
| 
 | ||||
| 				if !visible { | ||||
| 					// Status not visible to home timeline. | ||||
| 					statuses = slices.Delete(statuses, i, i+1) | ||||
| 					continue | ||||
| 				} | ||||
| 
 | ||||
| 				// Iter. | ||||
| 				i++ | ||||
| 			} | ||||
| 
 | ||||
| 			return | ||||
| 		// Database load function. | ||||
| 		func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) { | ||||
| 			return p.state.DB.GetListTimeline(ctx, requester.ID, pg) | ||||
| 		}, | ||||
| 
 | ||||
| 		// No furthering | ||||
| 		// filter function. | ||||
| 		// Pre-filtering function, | ||||
| 		// i.e. filter before caching. | ||||
| 		func(s *gtsmodel.Status) (bool, error) { | ||||
| 
 | ||||
| 			// Check the visibility of passed status to requesting user. | ||||
| 			ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s) | ||||
| 			return !ok, err | ||||
| 		}, | ||||
| 
 | ||||
| 		// Post-filtering function, | ||||
| 		// i.e. filter after caching. | ||||
| 		nil, | ||||
| 	) | ||||
| } | ||||
|  |  | |||
|  | @ -19,13 +19,9 @@ package timeline | |||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"net/url" | ||||
| 	"slices" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||
| 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||
|  | @ -42,9 +38,7 @@ func (p *Processor) PublicTimelineGet( | |||
| 	*apimodel.PageableResponse, | ||||
| 	gtserror.WithCode, | ||||
| ) { | ||||
| 
 | ||||
| 	// Load timeline data. | ||||
| 	return p.getTimeline(ctx, | ||||
| 	return p.getStatusTimeline(ctx, | ||||
| 
 | ||||
| 		// Auth'd | ||||
| 		// account. | ||||
|  | @ -60,68 +54,42 @@ func (p *Processor) PublicTimelineGet( | |||
| 		// Public timeline endpoint. | ||||
| 		"/api/v1/timelines/public", | ||||
| 
 | ||||
| 		// Set local-only timeline page query flag. | ||||
| 		url.Values{"local": {strconv.FormatBool(local)}}, | ||||
| 		// Set local-only timeline | ||||
| 		// page query flag, (this map | ||||
| 		// later gets copied before | ||||
| 		// any further usage). | ||||
| 		func() url.Values { | ||||
| 			if local { | ||||
| 				return localOnlyTrue | ||||
| 			} | ||||
| 			return localOnlyFalse | ||||
| 		}(), | ||||
| 
 | ||||
| 		// Status filter context. | ||||
| 		statusfilter.FilterContextPublic, | ||||
| 
 | ||||
| 		// Timeline cache load function, used to further hydrate cache where necessary. | ||||
| 		func(page *paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error) { | ||||
| 
 | ||||
| 			// Fetch the global public status timeline page. | ||||
| 			statuses, err = p.state.DB.GetPublicTimeline(ctx, | ||||
| 				page, | ||||
| 			) | ||||
| 			if err != nil && !errors.Is(err, db.ErrNoEntries) { | ||||
| 				return nil, nil, gtserror.Newf("error getting statuses: %w", err) | ||||
| 			} | ||||
| 
 | ||||
| 			if len(statuses) == 0 { | ||||
| 				// No more to load. | ||||
| 				return nil, nil, nil | ||||
| 			} | ||||
| 
 | ||||
| 			// Get the lowest and highest | ||||
| 			// ID values, used for next pg. | ||||
| 			lo := statuses[len(statuses)-1].ID | ||||
| 			hi := statuses[0].ID | ||||
| 
 | ||||
| 			// Set next paging value. | ||||
| 			page = page.Next(lo, hi) | ||||
| 
 | ||||
| 			for i := 0; i < len(statuses); { | ||||
| 				// Get status at idx. | ||||
| 				status := statuses[i] | ||||
| 
 | ||||
| 				// Check whether status should be show on public timeline. | ||||
| 				visible, err := p.visFilter.StatusPublicTimelineable(ctx, | ||||
| 					requester, | ||||
| 					status, | ||||
| 				) | ||||
| 				if err != nil { | ||||
| 					return nil, nil, gtserror.Newf("error checking visibility: %w", err) | ||||
| 				} | ||||
| 
 | ||||
| 				if !visible { | ||||
| 					// Status not visible to home timeline. | ||||
| 					statuses = slices.Delete(statuses, i, i+1) | ||||
| 					continue | ||||
| 				} | ||||
| 
 | ||||
| 				// Iter. | ||||
| 				i++ | ||||
| 			} | ||||
| 
 | ||||
| 			return | ||||
| 		// Database load function. | ||||
| 		func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) { | ||||
| 			return p.state.DB.GetPublicTimeline(ctx, pg) | ||||
| 		}, | ||||
| 
 | ||||
| 		// Per-request filtering function. | ||||
| 		func(s *gtsmodel.Status) bool { | ||||
| 			if local { | ||||
| 				return !*s.Local | ||||
| 		// Pre-filtering function, | ||||
| 		// i.e. filter before caching. | ||||
| 		nil, | ||||
| 
 | ||||
| 		// Post-filtering function, | ||||
| 		// i.e. filter after caching. | ||||
| 		func(s *gtsmodel.Status) (bool, error) { | ||||
| 
 | ||||
| 			// Remove any non-local statuses | ||||
| 			// if requester wants local-only. | ||||
| 			if local && !*s.Local { | ||||
| 				return true, nil | ||||
| 			} | ||||
| 			return false | ||||
| 
 | ||||
| 			// Check the visibility of passed status to requesting user. | ||||
| 			ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s) | ||||
| 			return !ok, err | ||||
| 		}, | ||||
| 	) | ||||
| } | ||||
|  |  | |||
|  | @ -20,11 +20,10 @@ package timeline | |||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"slices" | ||||
| 
 | ||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/cache" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/cache/timeline" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||
| 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | ||||
|  | @ -32,10 +31,19 @@ import ( | |||
| 	"github.com/superseriousbusiness/gotosocial/internal/filter/visibility" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/log" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/paging" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/state" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/typeutils" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/util/xslices" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// pre-prepared URL values to be passed in to | ||||
| 	// paging response forms. The paging package always | ||||
| 	// copies values before any modifications so it's | ||||
| 	// safe to only use a single map variable for these. | ||||
| 	localOnlyTrue  = url.Values{"local": {"true"}} | ||||
| 	localOnlyFalse = url.Values{"local": {"false"}} | ||||
| ) | ||||
| 
 | ||||
| type Processor struct { | ||||
|  | @ -62,7 +70,7 @@ func (p *Processor) getStatusTimeline( | |||
| 	filterCtx statusfilter.FilterContext, | ||||
| 	loadPage func(*paging.Page) (statuses []*gtsmodel.Status, err error), | ||||
| 	preFilter func(*gtsmodel.Status) (bool, error), | ||||
| 	postFilter func(*timeline.StatusMeta) bool, | ||||
| 	postFilter func(*gtsmodel.Status) (bool, error), | ||||
| ) ( | ||||
| 	*apimodel.PageableResponse, | ||||
| 	gtserror.WithCode, | ||||
|  | @ -99,7 +107,8 @@ func (p *Processor) getStatusTimeline( | |||
| 	} | ||||
| 
 | ||||
| 	// ... | ||||
| 	statuses, err := timeline.Load(ctx, | ||||
| 	apiStatuses, lo, hi, err := timeline.Load(ctx, | ||||
| 
 | ||||
| 		page, | ||||
| 
 | ||||
| 		// ... | ||||
|  | @ -110,10 +119,12 @@ func (p *Processor) getStatusTimeline( | |||
| 			return p.state.DB.GetStatusesByIDs(ctx, ids) | ||||
| 		}, | ||||
| 
 | ||||
| 		// ... | ||||
| 		// Pre-filtering function, | ||||
| 		// i.e. filter before caching. | ||||
| 		preFilter, | ||||
| 
 | ||||
| 		// ... | ||||
| 		// Post-filtering function, | ||||
| 		// i.e. filter after caching. | ||||
| 		postFilter, | ||||
| 
 | ||||
| 		// ... | ||||
|  | @ -132,192 +143,16 @@ func (p *Processor) getStatusTimeline( | |||
| 		}, | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 		err := gtserror.Newf("error loading timeline: %w", err) | ||||
| 		return nil, gtserror.WrapWithCode(http.StatusInternalServerError, err) | ||||
| 	} | ||||
| 
 | ||||
| func (p *Processor) getTimeline( | ||||
| 	ctx context.Context, | ||||
| 	requester *gtsmodel.Account, | ||||
| 	timeline *timeline.StatusTimeline, | ||||
| 	page *paging.Page, | ||||
| 	pgPath string, // timeline page path | ||||
| 	pgQuery url.Values, // timeline query parameters | ||||
| 	filterCtx statusfilter.FilterContext, | ||||
| 	load func(*paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error), // timeline cache load function | ||||
| 	filter func(*gtsmodel.Status) bool, // per-request filtering function, done AFTER timeline caching | ||||
| ) ( | ||||
| 	*apimodel.PageableResponse, | ||||
| 	gtserror.WithCode, | ||||
| ) { | ||||
| 	// Load timeline with cache / loader funcs. | ||||
| 	statuses, errWithCode := p.loadTimeline(ctx, | ||||
| 		timeline, | ||||
| 		page, | ||||
| 		load, | ||||
| 		filter, | ||||
| 	) | ||||
| 	if errWithCode != nil { | ||||
| 		return nil, errWithCode | ||||
| 	} | ||||
| 
 | ||||
| 	if len(statuses) == 0 { | ||||
| 		// Check for an empty timeline rsp. | ||||
| 		return paging.EmptyResponse(), nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Get the lowest and highest | ||||
| 	// ID values, used for paging. | ||||
| 	lo := statuses[len(statuses)-1].ID | ||||
| 	hi := statuses[0].ID | ||||
| 
 | ||||
| 	var ( | ||||
| 		filters []*gtsmodel.Filter | ||||
| 		mutes   *usermute.CompiledUserMuteList | ||||
| 	) | ||||
| 
 | ||||
| 	if requester != nil { | ||||
| 		var err error | ||||
| 
 | ||||
| 		// Fetch all filters relevant for requesting account. | ||||
| 		filters, err = p.state.DB.GetFiltersForAccountID(ctx, | ||||
| 			requester.ID, | ||||
| 		) | ||||
| 		if err != nil && !errors.Is(err, db.ErrNoEntries) { | ||||
| 			err := gtserror.Newf("error getting account filters: %w", err) | ||||
| 			return nil, gtserror.NewErrorInternalError(err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Get a list of all account mutes for requester. | ||||
| 		allMutes, err := p.state.DB.GetAccountMutes(ctx, | ||||
| 			requester.ID, | ||||
| 			nil, // nil page, i.e. all | ||||
| 		) | ||||
| 		if err != nil && !errors.Is(err, db.ErrNoEntries) { | ||||
| 			err := gtserror.Newf("error getting account mutes: %w", err) | ||||
| 			return nil, gtserror.NewErrorInternalError(err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Compile all account mutes to useable form. | ||||
| 		mutes = usermute.NewCompiledUserMuteList(allMutes) | ||||
| 	} | ||||
| 
 | ||||
| 	// NOTE: | ||||
| 	// Right now this is not ideal, as we perform mute and | ||||
| 	// status filtering *after* the above load loop, so we | ||||
| 	// could end up with no statuses still AFTER all loading. | ||||
| 	// | ||||
| 	// In a PR coming *soon* we will move the filtering and | ||||
| 	// status muting into separate module similar to the visibility | ||||
| 	// filtering and caching which should move it to the above | ||||
| 	// load loop and provided function. | ||||
| 
 | ||||
| 	// API response requires them in interface{} form. | ||||
| 	items := make([]interface{}, 0, len(statuses)) | ||||
| 
 | ||||
| 	for _, status := range statuses { | ||||
| 		// Convert internal status model to frontend model. | ||||
| 		apiStatus, err := p.converter.StatusToAPIStatus(ctx, | ||||
| 			status, | ||||
| 			requester, | ||||
| 			filterCtx, | ||||
| 			filters, | ||||
| 			mutes, | ||||
| 		) | ||||
| 		if err != nil && !errors.Is(err, statusfilter.ErrHideStatus) { | ||||
| 			log.Errorf(ctx, "error converting status: %v", err) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if apiStatus != nil { | ||||
| 			// Append status to return slice. | ||||
| 			items = append(items, apiStatus) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Package converted API statuses as pageable response. | ||||
| 	// Package returned API statuses as pageable response. | ||||
| 	return paging.PackageResponse(paging.ResponseParams{ | ||||
| 		Items: items, | ||||
| 		Items: xslices.ToAny(apiStatuses), | ||||
| 		Path:  pgPath, | ||||
| 		Next:  page.Next(lo, hi), | ||||
| 		Prev:  page.Prev(lo, hi), | ||||
| 		Path:  pgPath, | ||||
| 		Query: pgQuery, | ||||
| 	}), nil | ||||
| } | ||||
| 
 | ||||
| func (p *Processor) loadTimeline( | ||||
| 	ctx context.Context, | ||||
| 	timeline *cache.TimelineCache[*gtsmodel.Status], | ||||
| 	page *paging.Page, | ||||
| 	load func(*paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error), | ||||
| 	filter func(*gtsmodel.Status) bool, | ||||
| ) ( | ||||
| 	[]*gtsmodel.Status, | ||||
| 	gtserror.WithCode, | ||||
| ) { | ||||
| 	if load == nil { | ||||
| 		// nil check outside | ||||
| 		// below main loop. | ||||
| 		panic("nil func") | ||||
| 	} | ||||
| 
 | ||||
| 	if page == nil { | ||||
| 		const text = "timeline must be paged" | ||||
| 		return nil, gtserror.NewErrorBadRequest( | ||||
| 			errors.New(text), | ||||
| 			text, | ||||
| 		) | ||||
| 	} | ||||
| 
 | ||||
| 	// Try load statuses from cache. | ||||
| 	statuses := timeline.Select(page) | ||||
| 
 | ||||
| 	// Filter statuses using provided function. | ||||
| 	statuses = slices.DeleteFunc(statuses, filter) | ||||
| 
 | ||||
| 	// Check if more statuses need to be loaded. | ||||
| 	if limit := page.Limit; len(statuses) < limit { | ||||
| 
 | ||||
| 		// Set first page | ||||
| 		// query to load. | ||||
| 		nextPg := page | ||||
| 
 | ||||
| 		for i := 0; i < 5; i++ { | ||||
| 			var err error | ||||
| 			var next []*gtsmodel.Status | ||||
| 
 | ||||
| 			// Load next timeline statuses. | ||||
| 			next, nextPg, err = load(nextPg) | ||||
| 			if err != nil { | ||||
| 				err := gtserror.Newf("error loading timeline: %w", err) | ||||
| 				return nil, gtserror.NewErrorInternalError(err) | ||||
| 			} | ||||
| 
 | ||||
| 			// An empty next page means no more. | ||||
| 			if len(next) == 0 && nextPg == nil { | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			// Cache loaded statuses. | ||||
| 			timeline.Insert(next...) | ||||
| 
 | ||||
| 			// Filter statuses using provided function, | ||||
| 			// this must be done AFTER cache insert but | ||||
| 			// BEFORE adding to slice, as this is used | ||||
| 			// for request-specific timeline filtering, | ||||
| 			// as opposed to filtering for entire cache. | ||||
| 			next = slices.DeleteFunc(next, filter) | ||||
| 
 | ||||
| 			// Append loaded statuses to return. | ||||
| 			statuses = append(statuses, next...) | ||||
| 
 | ||||
| 			if len(statuses) >= limit { | ||||
| 				// We loaded all the statuses | ||||
| 				// that were requested of us! | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return statuses, nil | ||||
| } | ||||
|  |  | |||
|  | @ -682,13 +682,8 @@ func (p *clientAPI) CreateBlock(ctx context.Context, cMsg *messages.FromClientAP | |||
| 		return gtserror.Newf("%T not parseable as *gtsmodel.Block", cMsg.GTSModel) | ||||
| 	} | ||||
| 
 | ||||
| 	// Remove blocker's statuses from blocker's timeline. | ||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.AccountID, "AccountID", block.TargetAccountID) | ||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.AccountID, "BoostOfAccountID", block.TargetAccountID) | ||||
| 
 | ||||
| 	// Remove blockee's statuses from blockee's timeline. | ||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.TargetAccountID, "AccountID", block.AccountID) | ||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.TargetAccountID, "BoostOfAccountID", block.AccountID) | ||||
| 	// Perform any necessary timeline invalidation. | ||||
| 	p.surface.invalidateTimelinesForBlock(ctx, block) | ||||
| 
 | ||||
| 	// TODO: same with notifications? | ||||
| 	// TODO: same with bookmarks? | ||||
|  |  | |||
|  | @ -701,53 +701,19 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e | |||
| 		return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel) | ||||
| 	} | ||||
| 
 | ||||
| 	// Remove each account's posts from the other's timelines. | ||||
| 	// | ||||
| 	// First home timelines. | ||||
| 	if err := p.state.Timelines.Home.WipeItemsFromAccountID( | ||||
| 		ctx, | ||||
| 		block.AccountID, | ||||
| 		block.TargetAccountID, | ||||
| 	); err != nil { | ||||
| 		log.Errorf(ctx, "error wiping items from block -> target's home timeline: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := p.state.Timelines.Home.WipeItemsFromAccountID( | ||||
| 		ctx, | ||||
| 		block.TargetAccountID, | ||||
| 		block.AccountID, | ||||
| 	); err != nil { | ||||
| 		log.Errorf(ctx, "error wiping items from target -> block's home timeline: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Now list timelines. | ||||
| 	if err := p.state.Timelines.List.WipeItemsFromAccountID( | ||||
| 		ctx, | ||||
| 		block.AccountID, | ||||
| 		block.TargetAccountID, | ||||
| 	); err != nil { | ||||
| 		log.Errorf(ctx, "error wiping items from block -> target's list timeline(s): %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := p.state.Timelines.List.WipeItemsFromAccountID( | ||||
| 		ctx, | ||||
| 		block.TargetAccountID, | ||||
| 		block.AccountID, | ||||
| 	); err != nil { | ||||
| 		log.Errorf(ctx, "error wiping items from target -> block's list timeline(s): %v", err) | ||||
| 	} | ||||
| 	// Perform any necessary timeline invalidation. | ||||
| 	p.surface.invalidateTimelinesForBlock(ctx, block) | ||||
| 
 | ||||
| 	// Remove any follows that existed between blocker + blockee. | ||||
| 	if err := p.state.DB.DeleteFollow( | ||||
| 		ctx, | ||||
| 	// (note this handles removing any necessary list entries). | ||||
| 	if err := p.state.DB.DeleteFollow(ctx, | ||||
| 		block.AccountID, | ||||
| 		block.TargetAccountID, | ||||
| 	); err != nil { | ||||
| 		log.Errorf(ctx, "error deleting follow from block -> target: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := p.state.DB.DeleteFollow( | ||||
| 		ctx, | ||||
| 	if err := p.state.DB.DeleteFollow(ctx, | ||||
| 		block.TargetAccountID, | ||||
| 		block.AccountID, | ||||
| 	); err != nil { | ||||
|  | @ -755,16 +721,14 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e | |||
| 	} | ||||
| 
 | ||||
| 	// Remove any follow requests that existed between blocker + blockee. | ||||
| 	if err := p.state.DB.DeleteFollowRequest( | ||||
| 		ctx, | ||||
| 	if err := p.state.DB.DeleteFollowRequest(ctx, | ||||
| 		block.AccountID, | ||||
| 		block.TargetAccountID, | ||||
| 	); err != nil { | ||||
| 		log.Errorf(ctx, "error deleting follow request from block -> target: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := p.state.DB.DeleteFollowRequest( | ||||
| 		ctx, | ||||
| 	if err := p.state.DB.DeleteFollowRequest(ctx, | ||||
| 		block.TargetAccountID, | ||||
| 		block.AccountID, | ||||
| 	); err != nil { | ||||
|  |  | |||
|  | @ -553,13 +553,8 @@ func (s *Surface) tagFollowersForStatus( | |||
| // deleteStatusFromTimelines completely removes the given status from all timelines. | ||||
| // It will also stream deletion of the status to all open streams. | ||||
| func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) error { | ||||
| 	if err := s.State.Timelines.Home.WipeItemFromAllTimelines(ctx, statusID); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := s.State.Timelines.List.WipeItemFromAllTimelines(ctx, statusID); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	s.State.Caches.Timelines.Home.RemoveByStatusIDs(statusID) | ||||
| 	s.State.Caches.Timelines.List.RemoveByStatusIDs(statusID) | ||||
| 	s.Stream.Delete(ctx, statusID) | ||||
| 	return nil | ||||
| } | ||||
|  | @ -569,19 +564,8 @@ func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string | |||
| // stats, boost counts, etc) next time it's fetched by the timeline owner. This goes | ||||
| // both for the status itself, and for any boosts of the status. | ||||
| func (s *Surface) invalidateStatusFromTimelines(ctx context.Context, statusID string) { | ||||
| 	if err := s.State.Timelines.Home.UnprepareItemFromAllTimelines(ctx, statusID); err != nil { | ||||
| 		log. | ||||
| 			WithContext(ctx). | ||||
| 			WithField("statusID", statusID). | ||||
| 			Errorf("error unpreparing status from home timelines: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := s.State.Timelines.List.UnprepareItemFromAllTimelines(ctx, statusID); err != nil { | ||||
| 		log. | ||||
| 			WithContext(ctx). | ||||
| 			WithField("statusID", statusID). | ||||
| 			Errorf("error unpreparing status from list timelines: %v", err) | ||||
| 	} | ||||
| 	s.State.Caches.Timelines.Home.UnprepareByStatusIDs(statusID) | ||||
| 	s.State.Caches.Timelines.List.UnprepareByStatusIDs(statusID) | ||||
| } | ||||
| 
 | ||||
| // timelineStatusUpdate looks up HOME and LIST timelines of accounts | ||||
|  | @ -860,3 +844,57 @@ func (s *Surface) timelineStatusUpdateForTagFollowers( | |||
| 	} | ||||
| 	return errs.Combine() | ||||
| } | ||||
| 
 | ||||
| // invalidateTimelinesForBlock ... | ||||
| func (s *Surface) invalidateTimelinesForBlock(ctx context.Context, block *gtsmodel.Block) { | ||||
| 
 | ||||
| 	// Check if origin is local account, | ||||
| 	// i.e. has status timeline caches. | ||||
| 	if block.Account.IsLocal() { | ||||
| 
 | ||||
| 		// Remove target's statuses | ||||
| 		// from origin's home timeline. | ||||
| 		s.State.Caches.Timelines.Home. | ||||
| 			MustGet(block.AccountID). | ||||
| 			RemoveByAccountIDs(block.TargetAccountID) | ||||
| 
 | ||||
| 		// Get the IDs of any lists created by origin account. | ||||
| 		listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, block.AccountID) | ||||
| 		if err != nil { | ||||
| 			log.Errorf(ctx, "error getting account's list IDs for %s: %v", block.URI, err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Remove target's statuses from | ||||
| 		// any of origin's list timelines. | ||||
| 		for _, listID := range listIDs { | ||||
| 			s.State.Caches.Timelines.List. | ||||
| 				MustGet(listID). | ||||
| 				RemoveByAccountIDs(block.TargetAccountID) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Check if target is local account, | ||||
| 	// i.e. has status timeline caches. | ||||
| 	if block.TargetAccount.IsLocal() { | ||||
| 
 | ||||
| 		// Remove origin's statuses | ||||
| 		// from target's home timeline. | ||||
| 		s.State.Caches.Timelines.Home. | ||||
| 			MustGet(block.TargetAccountID). | ||||
| 			RemoveByAccountIDs(block.AccountID) | ||||
| 
 | ||||
| 		// Get the IDs of any lists created by target account. | ||||
| 		listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, block.TargetAccountID) | ||||
| 		if err != nil { | ||||
| 			log.Errorf(ctx, "error getting target account's list IDs for %s: %v", block.URI, err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Remove origin's statuses from | ||||
| 		// any of target's list timelines. | ||||
| 		for _, listID := range listIDs { | ||||
| 			s.State.Caches.Timelines.List. | ||||
| 				MustGet(listID). | ||||
| 				RemoveByAccountIDs(block.AccountID) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -21,6 +21,16 @@ import ( | |||
| 	"slices" | ||||
| ) | ||||
| 
 | ||||
| // ToAny converts a slice of any input type | ||||
| // to the abstrace empty interface slice type. | ||||
| func ToAny[T any](in []T) []any { | ||||
| 	out := make([]any, len(in)) | ||||
| 	for i, v := range in { | ||||
| 		out[i] = v | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
| 
 | ||||
| // GrowJust increases slice capacity to guarantee | ||||
| // extra room 'size', where in the case that it does | ||||
| // need to allocate more it ONLY allocates 'size' extra. | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue