mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 11:52:24 -05:00 
			
		
		
		
	more work integration new timeline code
This commit is contained in:
		
					parent
					
						
							
								49d9a008d9
							
						
					
				
			
			
				commit
				
					
						771fbe2d5e
					
				
			
		
					 14 changed files with 419 additions and 606 deletions
				
			
		
							
								
								
									
										23
									
								
								internal/cache/db.go
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								internal/cache/db.go
									
										
									
									
										vendored
									
									
								
							|  | @ -1311,29 +1311,6 @@ func (c *Caches) initStatus() { | ||||||
| 
 | 
 | ||||||
| 	log.Infof(nil, "cache size = %d", cap) | 	log.Infof(nil, "cache size = %d", cap) | ||||||
| 
 | 
 | ||||||
| 	copyF := func(s1 *gtsmodel.Status) *gtsmodel.Status { |  | ||||||
| 		s2 := new(gtsmodel.Status) |  | ||||||
| 		*s2 = *s1 |  | ||||||
| 
 |  | ||||||
| 		// Don't include ptr fields that |  | ||||||
| 		// will be populated separately. |  | ||||||
| 		// See internal/db/bundb/status.go. |  | ||||||
| 		s2.Account = nil |  | ||||||
| 		s2.InReplyTo = nil |  | ||||||
| 		s2.InReplyToAccount = nil |  | ||||||
| 		s2.BoostOf = nil |  | ||||||
| 		s2.BoostOfAccount = nil |  | ||||||
| 		s2.Poll = nil |  | ||||||
| 		s2.Attachments = nil |  | ||||||
| 		s2.Tags = nil |  | ||||||
| 		s2.Mentions = nil |  | ||||||
| 		s2.Emojis = nil |  | ||||||
| 		s2.CreatedWithApplication = nil |  | ||||||
| 		s2.Edits = nil |  | ||||||
| 
 |  | ||||||
| 		return s2 |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c.DB.Status.Init(structr.CacheConfig[*gtsmodel.Status]{ | 	c.DB.Status.Init(structr.CacheConfig[*gtsmodel.Status]{ | ||||||
| 		Indices: []structr.IndexConfig{ | 		Indices: []structr.IndexConfig{ | ||||||
| 			{Fields: "ID"}, | 			{Fields: "ID"}, | ||||||
|  |  | ||||||
							
								
								
									
										301
									
								
								internal/cache/timeline/status.go
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										301
									
								
								internal/cache/timeline/status.go
									
										
									
									
										vendored
									
									
								
							|  | @ -30,6 +30,7 @@ import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/log" | 	"github.com/superseriousbusiness/gotosocial/internal/log" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/paging" | 	"github.com/superseriousbusiness/gotosocial/internal/paging" | ||||||
|  | 	"github.com/superseriousbusiness/gotosocial/internal/util/xslices" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // StatusMeta contains minimum viable metadata | // StatusMeta contains minimum viable metadata | ||||||
|  | @ -59,6 +60,12 @@ type StatusMeta struct { | ||||||
| 	loaded *gtsmodel.Status | 	loaded *gtsmodel.Status | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // isLoaded is a small utility func that can fill | ||||||
|  | // the slices.DeleteFunc() signature requirements. | ||||||
|  | func (m *StatusMeta) isLoaded() bool { | ||||||
|  | 	return m.loaded == nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // StatusTimelines ... | // StatusTimelines ... | ||||||
| type StatusTimelines struct { | type StatusTimelines struct { | ||||||
| 	ptr atomic.Pointer[map[string]*StatusTimeline] // ronly except by CAS | 	ptr atomic.Pointer[map[string]*StatusTimeline] // ronly except by CAS | ||||||
|  | @ -266,7 +273,7 @@ func (t *StatusTimeline) Init(cap int) { | ||||||
| 				AccountID:        s.AccountID, | 				AccountID:        s.AccountID, | ||||||
| 				BoostOfID:        s.BoostOfID, | 				BoostOfID:        s.BoostOfID, | ||||||
| 				BoostOfAccountID: s.BoostOfAccountID, | 				BoostOfAccountID: s.BoostOfAccountID, | ||||||
| 				loaded:           nil, // NEVER copied | 				loaded:           nil, // NEVER stored | ||||||
| 				prepared:         prepared, | 				prepared:         prepared, | ||||||
| 			} | 			} | ||||||
| 		}, | 		}, | ||||||
|  | @ -285,25 +292,28 @@ func (t *StatusTimeline) Load( | ||||||
| 	page *paging.Page, | 	page *paging.Page, | ||||||
| 
 | 
 | ||||||
| 	// loadPage should load the timeline of given page for cache hydration. | 	// loadPage should load the timeline of given page for cache hydration. | ||||||
| 	loadPage func(page *paging.Page) ([]*gtsmodel.Status, error), | 	loadPage func(page *paging.Page) (statuses []*gtsmodel.Status, err error), | ||||||
| 
 | 
 | ||||||
| 	// loadIDs should load status models with given IDs. | 	// loadIDs should load status models with given IDs, this is used | ||||||
| 	loadIDs func([]string) ([]*gtsmodel.Status, error), | 	// to load status models of already cached entries in the timeline. | ||||||
|  | 	loadIDs func(ids []string) (statuses []*gtsmodel.Status, err error), | ||||||
| 
 | 
 | ||||||
| 	// preFilter can be used to perform filtering of returned | 	// preFilter can be used to perform filtering of returned | ||||||
| 	// statuses BEFORE insert into cache. i.e. this will effect | 	// statuses BEFORE insert into cache. i.e. this will effect | ||||||
| 	// what actually gets stored in the timeline cache. | 	// what actually gets stored in the timeline cache. | ||||||
| 	preFilter func(*gtsmodel.Status) (bool, error), | 	preFilter func(each *gtsmodel.Status) (delete bool, err error), | ||||||
| 
 | 
 | ||||||
| 	// postFilterFn can be used to perform filtering of returned | 	// postFilterFn can be used to perform filtering of returned | ||||||
| 	// statuses AFTER insert into cache. i.e. this will not effect | 	// statuses AFTER insert into cache. i.e. this will not effect | ||||||
| 	// what actually gets stored in the timeline cache. | 	// what actually gets stored in the timeline cache. | ||||||
| 	postFilter func(*StatusMeta) bool, | 	postFilter func(each *gtsmodel.Status) (delete bool, err error), | ||||||
| 
 | 
 | ||||||
| 	// prepareAPI should prepare internal status model to frontend API model. | 	// prepareAPI should prepare internal status model to frontend API model. | ||||||
| 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | 	prepareAPI func(status *gtsmodel.Status) (apiStatus *apimodel.Status, err error), | ||||||
| ) ( | ) ( | ||||||
| 	[]*apimodel.Status, | 	[]*apimodel.Status, | ||||||
|  | 	string, // lo | ||||||
|  | 	string, // hi | ||||||
| 	error, | 	error, | ||||||
| ) { | ) { | ||||||
| 	switch { | 	switch { | ||||||
|  | @ -320,18 +330,54 @@ func (t *StatusTimeline) Load( | ||||||
| 	ord := page.Order() | 	ord := page.Order() | ||||||
| 	dir := toDirection(ord) | 	dir := toDirection(ord) | ||||||
| 
 | 
 | ||||||
| 	// Load cached timeline entries for page. | 	// First we attempt to load status metadata | ||||||
| 	meta := t.cache.Select(min, max, lim, dir) | 	// entries from the timeline cache, up to lim. | ||||||
|  | 	metas := t.cache.Select(min, max, lim, dir) | ||||||
| 
 | 
 | ||||||
| 	// Perform any timeline post-filtering. | 	// Set the starting lo / hi ID paging | ||||||
| 	meta = doPostFilter(meta, postFilter) | 	// values. We continually update these | ||||||
|  | 	// for further timeline selections and | ||||||
|  | 	// for returning final next / prev pgs. | ||||||
|  | 	lo, hi := min, max | ||||||
| 
 | 
 | ||||||
| 	// ... | 	if len(metas) > 0 { | ||||||
| 	if need := len(meta) - lim; need > 0 { | 		// Update paging values | ||||||
|  | 		// based on returned data. | ||||||
|  | 		lo, hi = nextPageParams( | ||||||
|  | 			lo, hi, | ||||||
|  | 			metas[len(metas)-1].ID, | ||||||
|  | 			metas[0].ID, | ||||||
|  | 			ord, | ||||||
|  | 		) | ||||||
| 
 | 
 | ||||||
| 		// Set first page | 		// Before we can do any filtering, we need | ||||||
| 		// query to load. | 		// to load status models for cached entries. | ||||||
| 		nextPg := page | 		err := loadStatuses(ctx, metas, loadIDs) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, "", "", gtserror.Newf("error loading statuses: %w", err) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Drop all entries we failed to load statuses for. | ||||||
|  | 		metas = slices.DeleteFunc(metas, (*StatusMeta).isLoaded) | ||||||
|  | 
 | ||||||
|  | 		// Perform any post-filtering on cached status entries. | ||||||
|  | 		metas, _, err = doStatusPostFilter(metas, postFilter) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var filtered []*StatusMeta | ||||||
|  | 
 | ||||||
|  | 	// Check whether loaded enough from cache. | ||||||
|  | 	if need := len(metas) - lim; need > 0 { | ||||||
|  | 
 | ||||||
|  | 		// Use a copy of current page so | ||||||
|  | 		// we can repeatedly update it. | ||||||
|  | 		nextPg := new(paging.Page) | ||||||
|  | 		*nextPg = *page | ||||||
|  | 		nextPg.Min.Value = lo | ||||||
|  | 		nextPg.Max.Value = hi | ||||||
| 
 | 
 | ||||||
| 		// Perform a maximum of 5 | 		// Perform a maximum of 5 | ||||||
| 		// load attempts fetching | 		// load attempts fetching | ||||||
|  | @ -341,7 +387,7 @@ func (t *StatusTimeline) Load( | ||||||
| 			// Load next timeline statuses. | 			// Load next timeline statuses. | ||||||
| 			statuses, err := loadPage(nextPg) | 			statuses, err := loadPage(nextPg) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return nil, gtserror.Newf("error loading timeline: %w", err) | 				return nil, "", "", gtserror.Newf("error loading timeline: %w", err) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// No more statuses from | 			// No more statuses from | ||||||
|  | @ -350,59 +396,65 @@ func (t *StatusTimeline) Load( | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// Get the lowest and highest | 			// Update paging values | ||||||
| 			// ID values, used for next pg. | 			// based on returned data. | ||||||
| 			// Done BEFORE status filtering. | 			lo, hi = nextPageParams( | ||||||
| 			lo := statuses[len(statuses)-1].ID | 				lo, hi, | ||||||
| 			hi := statuses[0].ID | 				statuses[len(statuses)-1].ID, | ||||||
|  | 				statuses[0].ID, | ||||||
|  | 				ord, | ||||||
|  | 			) | ||||||
| 
 | 
 | ||||||
| 			// Perform any status timeline pre-filtering. | 			// Update paging params. | ||||||
| 			statuses, err = doPreFilter(statuses, preFilter) | 			nextPg.Min.Value = lo | ||||||
|  | 			nextPg.Max.Value = hi | ||||||
|  | 
 | ||||||
|  | 			// Perform any pre-filtering on newly loaded statuses. | ||||||
|  | 			statuses, err = doStatusPreFilter(statuses, preFilter) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return nil, gtserror.Newf("error pre-filtering timeline: %w", err) | 				return nil, "", "", gtserror.Newf("error pre-filtering statuses: %w", err) | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// After filtering no more | ||||||
|  | 			// statuses remain, retry. | ||||||
|  | 			if len(statuses) == 0 { | ||||||
|  | 				continue | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// Convert to our cache type, | 			// Convert to our cache type, | ||||||
| 			// these will get inserted into | 			// these will get inserted into | ||||||
| 			// the cache in prepare() below. | 			// the cache in prepare() below. | ||||||
| 			m := toStatusMeta(statuses) | 			uncached := toStatusMeta(statuses) | ||||||
| 
 | 
 | ||||||
| 			// Perform any post-filtering. | 			// Perform any post-filtering on recently loaded timeline entries. | ||||||
| 			// and append to main meta slice. | 			newMetas, newFiltered, err := doStatusPostFilter(uncached, postFilter) | ||||||
| 			m = slices.DeleteFunc(m, postFilter) | 			if err != nil { | ||||||
| 			meta = append(meta, m...) | 				return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// Append the meta to their relevant slices. | ||||||
|  | 			filtered = append(filtered, newFiltered...) | ||||||
|  | 			metas = append(metas, newMetas...) | ||||||
| 
 | 
 | ||||||
| 			// Check if we reached | 			// Check if we reached | ||||||
| 			// requested page limit. | 			// requested page limit. | ||||||
| 			if len(meta) >= lim { | 			if len(metas) >= lim { | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			// Set next paging value. |  | ||||||
| 			nextPg = nextPg.Next(lo, hi) |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Using meta and given funcs, prepare frontend API models. | 	// Using meta and funcs, prepare frontend API models. | ||||||
| 	apiStatuses, err := t.prepare(ctx, meta, loadIDs, prepareAPI) | 	apiStatuses, err := t.prepare(ctx, metas, prepareAPI) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, gtserror.Newf("error preparing api statuses: %w", err) | 		return nil, "", "", gtserror.Newf("error preparing api statuses: %w", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Ensure the returned statuses are ALWAYS in descending order. | 	// Even if we don't return them, insert | ||||||
| 	slices.SortFunc(apiStatuses, func(s1, s2 *apimodel.Status) int { | 	// the excess (post-filtered) into cache. | ||||||
| 		const k = +1 | 	t.cache.Insert(filtered...) | ||||||
| 		switch { |  | ||||||
| 		case s1.ID > s2.ID: |  | ||||||
| 			return +k |  | ||||||
| 		case s1.ID < s2.ID: |  | ||||||
| 			return -k |  | ||||||
| 		default: |  | ||||||
| 			return 0 |  | ||||||
| 		} |  | ||||||
| 	}) |  | ||||||
| 
 | 
 | ||||||
| 	return apiStatuses, nil | 	return apiStatuses, lo, hi, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Insert ... | // Insert ... | ||||||
|  | @ -543,15 +595,12 @@ func (t *StatusTimeline) Clear() { t.cache.Clear() } | ||||||
| func (t *StatusTimeline) prepare( | func (t *StatusTimeline) prepare( | ||||||
| 	ctx context.Context, | 	ctx context.Context, | ||||||
| 	meta []*StatusMeta, | 	meta []*StatusMeta, | ||||||
| 	loadIDs func([]string) ([]*gtsmodel.Status, error), |  | ||||||
| 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | ||||||
| ) ( | ) ( | ||||||
| 	[]*apimodel.Status, | 	[]*apimodel.Status, | ||||||
| 	error, | 	error, | ||||||
| ) { | ) { | ||||||
| 	switch { | 	switch { | ||||||
| 	case loadIDs == nil: |  | ||||||
| 		panic("nil load fn") |  | ||||||
| 	case prepareAPI == nil: | 	case prepareAPI == nil: | ||||||
| 		panic("nil prepare fn") | 		panic("nil prepare fn") | ||||||
| 	} | 	} | ||||||
|  | @ -569,39 +618,16 @@ func (t *StatusTimeline) prepare( | ||||||
| 
 | 
 | ||||||
| 	// If there were no unprepared | 	// If there were no unprepared | ||||||
| 	// StatusMeta objects, then we | 	// StatusMeta objects, then we | ||||||
| 	// gathered everything we need! | 	// gathered everything we can! | ||||||
| 	if len(unprepared) == 0 { | 	if len(unprepared) == 0 { | ||||||
| 		return apiStatuses, nil | 		return apiStatuses, nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Of the StatusMeta objects missing a prepared | 	// By this point all status objects should | ||||||
| 	// frontend model, find those without a recently | 	// be fully populated with loaded models, | ||||||
| 	// fetched database model and store their IDs, | 	// since they are required for filtering. | ||||||
| 	// as well mapping them for faster update below. |  | ||||||
| 	toLoadIDs := make([]string, len(unprepared)) |  | ||||||
| 	loadedMap := make(map[string]*StatusMeta, len(unprepared)) |  | ||||||
| 	for i, meta := range unprepared { |  | ||||||
| 		if meta.loaded == nil { |  | ||||||
| 			toLoadIDs[i] = meta.ID |  | ||||||
| 			loadedMap[meta.ID] = meta |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Load statuses with given IDs. |  | ||||||
| 	loaded, err := loadIDs(toLoadIDs) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, gtserror.Newf("error loading statuses: %w", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Update returned StatusMeta objects |  | ||||||
| 	// with newly loaded statuses by IDs. |  | ||||||
| 	for i := range loaded { |  | ||||||
| 		status := loaded[i] |  | ||||||
| 		meta := loadedMap[status.ID] |  | ||||||
| 		meta.loaded = status |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for i := 0; i < len(unprepared); { | 	for i := 0; i < len(unprepared); { | ||||||
|  | 
 | ||||||
| 		// Get meta at index. | 		// Get meta at index. | ||||||
| 		meta := unprepared[i] | 		meta := unprepared[i] | ||||||
| 
 | 
 | ||||||
|  | @ -632,28 +658,61 @@ func (t *StatusTimeline) prepare( | ||||||
| 	return apiStatuses, nil | 	return apiStatuses, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // loadStatuses ... | ||||||
|  | func loadStatuses( | ||||||
|  | 	ctx context.Context, | ||||||
|  | 	metas []*StatusMeta, | ||||||
|  | 	loadIDs func([]string) ([]*gtsmodel.Status, error), | ||||||
|  | ) error { | ||||||
|  | 	// ... | ||||||
|  | 	toLoadIDs := make([]string, len(metas)) | ||||||
|  | 	loadedMap := make(map[string]*StatusMeta, len(metas)) | ||||||
|  | 	for i, meta := range metas { | ||||||
|  | 		if meta.loaded == nil { | ||||||
|  | 			toLoadIDs[i] = meta.ID | ||||||
|  | 			loadedMap[meta.ID] = meta | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Load statuses with given IDs. | ||||||
|  | 	loaded, err := loadIDs(toLoadIDs) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return gtserror.Newf("error loading statuses: %w", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Update returned StatusMeta objects | ||||||
|  | 	// with newly loaded statuses by IDs. | ||||||
|  | 	for i := range loaded { | ||||||
|  | 		status := loaded[i] | ||||||
|  | 		meta := loadedMap[status.ID] | ||||||
|  | 		meta.loaded = status | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // toStatusMeta converts a slice of database model statuses | // toStatusMeta converts a slice of database model statuses | ||||||
| // into our cache wrapper type, a slice of []StatusMeta{}. | // into our cache wrapper type, a slice of []StatusMeta{}. | ||||||
| func toStatusMeta(statuses []*gtsmodel.Status) []*StatusMeta { | func toStatusMeta(statuses []*gtsmodel.Status) []*StatusMeta { | ||||||
| 	meta := make([]*StatusMeta, len(statuses)) | 	return xslices.Gather(nil, statuses, func(s *gtsmodel.Status) *StatusMeta { | ||||||
| 	for i := range statuses { | 		return &StatusMeta{ | ||||||
| 		status := statuses[i] | 			ID:               s.ID, | ||||||
| 		meta[i] = &StatusMeta{ | 			AccountID:        s.AccountID, | ||||||
| 			ID:               status.ID, | 			BoostOfID:        s.BoostOfID, | ||||||
| 			AccountID:        status.AccountID, | 			BoostOfAccountID: s.BoostOfAccountID, | ||||||
| 			BoostOfID:        status.BoostOfID, | 			Local:            *s.Local, | ||||||
| 			BoostOfAccountID: status.BoostOfAccountID, | 			loaded:           s, | ||||||
| 			Local:            *status.Local, |  | ||||||
| 			loaded:           status, |  | ||||||
| 			prepared:         nil, | 			prepared:         nil, | ||||||
| 		} | 		} | ||||||
| 	} | 	}) | ||||||
| 	return meta |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // doPreFilter acts similarly to slices.DeleteFunc but it accepts function with error return, or nil, returning early if so. | // ... | ||||||
| func doPreFilter(statuses []*gtsmodel.Status, preFilter func(*gtsmodel.Status) (bool, error)) ([]*gtsmodel.Status, error) { | func doStatusPreFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status) (bool, error)) ([]*gtsmodel.Status, error) { | ||||||
| 	if preFilter == nil { | 
 | ||||||
|  | 	// Check for provided | ||||||
|  | 	// filter function. | ||||||
|  | 	if filter == nil { | ||||||
| 		return statuses, nil | 		return statuses, nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -662,7 +721,7 @@ func doPreFilter(statuses []*gtsmodel.Status, preFilter func(*gtsmodel.Status) ( | ||||||
| 		status := statuses[i] | 		status := statuses[i] | ||||||
| 
 | 
 | ||||||
| 		// Pass through filter func. | 		// Pass through filter func. | ||||||
| 		ok, err := preFilter(status) | 		ok, err := filter(status) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, err | 			return nil, err | ||||||
| 		} | 		} | ||||||
|  | @ -680,22 +739,38 @@ func doPreFilter(statuses []*gtsmodel.Status, preFilter func(*gtsmodel.Status) ( | ||||||
| 	return statuses, nil | 	return statuses, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // doPostFilter acts similarly to slices.DeleteFunc but it handles case of a nil function. | // ... | ||||||
| func doPostFilter(statuses []*StatusMeta, postFilter func(*StatusMeta) bool) []*StatusMeta { | func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool, error)) ([]*StatusMeta, []*StatusMeta, error) { | ||||||
| 	if postFilter == nil { |  | ||||||
| 		return statuses |  | ||||||
| 	} |  | ||||||
| 	return slices.DeleteFunc(statuses, postFilter) |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| // toDirection converts page order to timeline direction. | 	// Check for provided | ||||||
| func toDirection(o paging.Order) structr.Direction { | 	// filter function. | ||||||
| 	switch o { | 	if filter == nil { | ||||||
| 	case paging.OrderAscending: | 		return metas, nil, nil | ||||||
| 		return structr.Asc |  | ||||||
| 	case paging.OrderDescending: |  | ||||||
| 		return structr.Desc |  | ||||||
| 	default: |  | ||||||
| 		return false |  | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	// Prepare a slice to store filtered statuses. | ||||||
|  | 	filtered := make([]*StatusMeta, 0, len(metas)) | ||||||
|  | 
 | ||||||
|  | 	// Iterate through input metas. | ||||||
|  | 	for i := 0; i < len(metas); { | ||||||
|  | 		meta := metas[i] | ||||||
|  | 
 | ||||||
|  | 		// Pass through filter func. | ||||||
|  | 		ok, err := filter(meta.loaded) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, nil, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if ok { | ||||||
|  | 			// Delete meta and add to filtered. | ||||||
|  | 			metas = slices.Delete(metas, i, i+1) | ||||||
|  | 			filtered = append(filtered, meta) | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Iter. | ||||||
|  | 		i++ | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return metas, filtered, nil | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										30
									
								
								internal/cache/timeline/timeline.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								internal/cache/timeline/timeline.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,30 @@ | ||||||
|  | package timeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"codeberg.org/gruf/go-structr" | ||||||
|  | 	"github.com/superseriousbusiness/gotosocial/internal/paging" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func nextPageParams( | ||||||
|  | 	curLo, curHi string, // current page params | ||||||
|  | 	nextLo, nextHi string, // next lo / hi values | ||||||
|  | 	order paging.Order, | ||||||
|  | ) (lo string, hi string) { | ||||||
|  | 	if order.Ascending() { | ||||||
|  | 
 | ||||||
|  | 	} else /* i.e. descending */ { | ||||||
|  | 
 | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // toDirection converts page order to timeline direction. | ||||||
|  | func toDirection(o paging.Order) structr.Direction { | ||||||
|  | 	switch o { | ||||||
|  | 	case paging.OrderAscending: | ||||||
|  | 		return structr.Asc | ||||||
|  | 	case paging.OrderDescending: | ||||||
|  | 		return structr.Desc | ||||||
|  | 	default: | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -87,7 +87,7 @@ func (l *listDB) getList(ctx context.Context, lookup string, dbQuery func(*gtsmo | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) { | func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) { | ||||||
| 	listIDs, err := l.getListIDsByAccountID(ctx, accountID) | 	listIDs, err := l.GetListIDsByAccountID(ctx, accountID) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
|  | @ -95,7 +95,7 @@ func (l *listDB) GetListsByAccountID(ctx context.Context, accountID string) ([]* | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (l *listDB) CountListsByAccountID(ctx context.Context, accountID string) (int, error) { | func (l *listDB) CountListsByAccountID(ctx context.Context, accountID string) (int, error) { | ||||||
| 	listIDs, err := l.getListIDsByAccountID(ctx, accountID) | 	listIDs, err := l.GetListIDsByAccountID(ctx, accountID) | ||||||
| 	return len(listIDs), err | 	return len(listIDs), err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -176,9 +176,8 @@ func (l *listDB) UpdateList(ctx context.Context, list *gtsmodel.List, columns .. | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := l.state.Timelines.List.RemoveTimeline(ctx, list.ID); err != nil { | 	// Clear cached timeline associated with list ID. | ||||||
| 		log.Errorf(ctx, "error invalidating list timeline: %q", err) | 	l.state.Caches.Timelines.List.Clear(list.ID) | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  | @ -220,10 +219,13 @@ func (l *listDB) DeleteListByID(ctx context.Context, id string) error { | ||||||
| 	// Invalidate all related entry caches for this list. | 	// Invalidate all related entry caches for this list. | ||||||
| 	l.invalidateEntryCaches(ctx, []string{id}, followIDs) | 	l.invalidateEntryCaches(ctx, []string{id}, followIDs) | ||||||
| 
 | 
 | ||||||
|  | 	// Delete the cached timeline of list. | ||||||
|  | 	l.state.Caches.Timelines.List.Delete(id) | ||||||
|  | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (l *listDB) getListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) { | func (l *listDB) GetListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) { | ||||||
| 	return l.state.Caches.DB.ListIDs.Load("a"+accountID, func() ([]string, error) { | 	return l.state.Caches.DB.ListIDs.Load("a"+accountID, func() ([]string, error) { | ||||||
| 		var listIDs []string | 		var listIDs []string | ||||||
| 
 | 
 | ||||||
|  | @ -460,10 +462,8 @@ func (l *listDB) invalidateEntryCaches(ctx context.Context, listIDs, followIDs [ | ||||||
| 			"f"+listID, | 			"f"+listID, | ||||||
| 		) | 		) | ||||||
| 
 | 
 | ||||||
| 		// Invalidate the timeline for the list this entry belongs to. | 		// Invalidate list timeline cache by ID. | ||||||
| 		if err := l.state.Timelines.List.RemoveTimeline(ctx, listID); err != nil { | 		l.state.Caches.Timelines.List.Clear(listID) | ||||||
| 			log.Errorf(ctx, "error invalidating list timeline: %q", err) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Invalidate ListedID slice cache entries. | 	// Invalidate ListedID slice cache entries. | ||||||
|  |  | ||||||
|  | @ -34,6 +34,9 @@ type List interface { | ||||||
| 	// GetListsByAccountID gets all lists owned by the given accountID. | 	// GetListsByAccountID gets all lists owned by the given accountID. | ||||||
| 	GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) | 	GetListsByAccountID(ctx context.Context, accountID string) ([]*gtsmodel.List, error) | ||||||
| 
 | 
 | ||||||
|  | 	// GetListIDsByAccountID gets the IDs of all lists owned by the given accountID. | ||||||
|  | 	GetListIDsByAccountID(ctx context.Context, accountID string) ([]string, error) | ||||||
|  | 
 | ||||||
| 	// CountListsByAccountID counts the number of lists owned by the given accountID. | 	// CountListsByAccountID counts the number of lists owned by the given accountID. | ||||||
| 	CountListsByAccountID(ctx context.Context, accountID string) (int, error) | 	CountListsByAccountID(ctx context.Context, accountID string) (int, error) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -306,25 +306,10 @@ func (p *Processor) InvalidateTimelinedStatus(ctx context.Context, accountID str | ||||||
| 		return gtserror.Newf("db error getting lists for account %s: %w", accountID, err) | 		return gtserror.Newf("db error getting lists for account %s: %w", accountID, err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Start new log entry with | 	// Unprepare item from home + list timelines. | ||||||
| 	// the above calling func's name. | 	p.state.Caches.Timelines.Home.MustGet(accountID).UnprepareByStatusIDs(statusID) | ||||||
| 	l := log. |  | ||||||
| 		WithContext(ctx). |  | ||||||
| 		WithField("caller", log.Caller(3)). |  | ||||||
| 		WithField("accountID", accountID). |  | ||||||
| 		WithField("statusID", statusID) |  | ||||||
| 
 |  | ||||||
| 	// Unprepare item from home + list timelines, just log |  | ||||||
| 	// if something goes wrong since this is not a showstopper. |  | ||||||
| 
 |  | ||||||
| 	if err := p.state.Timelines.Home.UnprepareItem(ctx, accountID, statusID); err != nil { |  | ||||||
| 		l.Errorf("error unpreparing item from home timeline: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for _, list := range lists { | 	for _, list := range lists { | ||||||
| 		if err := p.state.Timelines.List.UnprepareItem(ctx, list.ID, statusID); err != nil { | 		p.state.Caches.Timelines.List.MustGet(list.ID).UnprepareByStatusIDs(statusID) | ||||||
| 			l.Errorf("error unpreparing item from list timeline %s: %v", list.ID, err) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
|  |  | ||||||
|  | @ -19,11 +19,9 @@ package timeline | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"errors" | 	"net/url" | ||||||
| 	"slices" |  | ||||||
| 
 | 
 | ||||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" |  | ||||||
| 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||||
|  | @ -40,16 +38,14 @@ func (p *Processor) HomeTimelineGet( | ||||||
| 	*apimodel.PageableResponse, | 	*apimodel.PageableResponse, | ||||||
| 	gtserror.WithCode, | 	gtserror.WithCode, | ||||||
| ) { | ) { | ||||||
| 
 | 	return p.getStatusTimeline(ctx, | ||||||
| 	// Load timeline data. |  | ||||||
| 	return p.getTimeline(ctx, |  | ||||||
| 
 | 
 | ||||||
| 		// Auth'd | 		// Auth'd | ||||||
| 		// account. | 		// account. | ||||||
| 		requester, | 		requester, | ||||||
| 
 | 
 | ||||||
| 		// Home timeline cache for authorized account. | 		// Per-account home timeline cache. | ||||||
| 		p.state.Caches.Timelines.Home.Get(requester.ID), | 		p.state.Caches.Timelines.Home.MustGet(requester.ID), | ||||||
| 
 | 
 | ||||||
| 		// Current | 		// Current | ||||||
| 		// page. | 		// page. | ||||||
|  | @ -58,70 +54,45 @@ func (p *Processor) HomeTimelineGet( | ||||||
| 		// Home timeline endpoint. | 		// Home timeline endpoint. | ||||||
| 		"/api/v1/timelines/home", | 		"/api/v1/timelines/home", | ||||||
| 
 | 
 | ||||||
| 		// No page | 		// Set local-only timeline | ||||||
| 		// query. | 		// page query flag, (this map | ||||||
| 		nil, | 		// later gets copied before | ||||||
|  | 		// any further usage). | ||||||
|  | 		func() url.Values { | ||||||
|  | 			if local { | ||||||
|  | 				return localOnlyTrue | ||||||
|  | 			} | ||||||
|  | 			return localOnlyFalse | ||||||
|  | 		}(), | ||||||
| 
 | 
 | ||||||
| 		// Status filter context. | 		// Status filter context. | ||||||
| 		statusfilter.FilterContextHome, | 		statusfilter.FilterContextHome, | ||||||
| 
 | 
 | ||||||
| 		// Timeline cache load function, used to further hydrate cache where necessary. | 		// Database load function. | ||||||
| 		func(page *paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error) { | 		func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) { | ||||||
| 
 | 			return p.state.DB.GetHomeTimeline(ctx, requester.ID, pg) | ||||||
| 			// Fetch requesting account's home timeline page. |  | ||||||
| 			statuses, err = p.state.DB.GetHomeTimeline(ctx, |  | ||||||
| 				requester.ID, |  | ||||||
| 				page, |  | ||||||
| 			) |  | ||||||
| 			if err != nil && !errors.Is(err, db.ErrNoEntries) { |  | ||||||
| 				return nil, nil, gtserror.Newf("error getting statuses: %w", err) |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			if len(statuses) == 0 { |  | ||||||
| 				// No more to load. |  | ||||||
| 				return nil, nil, nil |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			// Get the lowest and highest |  | ||||||
| 			// ID values, used for next pg. |  | ||||||
| 			lo := statuses[len(statuses)-1].ID |  | ||||||
| 			hi := statuses[0].ID |  | ||||||
| 
 |  | ||||||
| 			// Set next paging value. |  | ||||||
| 			page = page.Next(lo, hi) |  | ||||||
| 
 |  | ||||||
| 			for i := 0; i < len(statuses); { |  | ||||||
| 				// Get status at idx. |  | ||||||
| 				status := statuses[i] |  | ||||||
| 
 |  | ||||||
| 				// Check whether status should be show on home timeline. |  | ||||||
| 				visible, err := p.visFilter.StatusHomeTimelineable(ctx, |  | ||||||
| 					requester, |  | ||||||
| 					status, |  | ||||||
| 				) |  | ||||||
| 				if err != nil { |  | ||||||
| 					return nil, nil, gtserror.Newf("error checking visibility: %w", err) |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				if !visible { |  | ||||||
| 					// Status not visible to home timeline. |  | ||||||
| 					statuses = slices.Delete(statuses, i, i+1) |  | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				// Iter. |  | ||||||
| 				i++ |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			return |  | ||||||
| 		}, | 		}, | ||||||
| 
 | 
 | ||||||
| 		// Per-request filtering function. | 		// Pre-filtering function, | ||||||
| 		func(s *gtsmodel.Status) bool { | 		// i.e. filter before caching. | ||||||
| 			if local { | 		func(s *gtsmodel.Status) (bool, error) { | ||||||
| 				return !*s.Local | 
 | ||||||
|  | 			// Check the visibility of passed status to requesting user. | ||||||
|  | 			ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s) | ||||||
|  | 			return !ok, err | ||||||
|  | 		}, | ||||||
|  | 
 | ||||||
|  | 		// Post-filtering function, | ||||||
|  | 		// i.e. filter after caching. | ||||||
|  | 		func(s *gtsmodel.Status) (bool, error) { | ||||||
|  | 
 | ||||||
|  | 			// Remove any non-local statuses | ||||||
|  | 			// if requester wants local-only. | ||||||
|  | 			if local && !*s.Local { | ||||||
|  | 				return true, nil | ||||||
| 			} | 			} | ||||||
| 			return false | 
 | ||||||
|  | 			return false, nil | ||||||
| 		}, | 		}, | ||||||
| 	) | 	) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -20,7 +20,6 @@ package timeline | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"errors" | 	"errors" | ||||||
| 	"slices" |  | ||||||
| 
 | 
 | ||||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
|  | @ -65,21 +64,21 @@ func (p *Processor) ListTimelineGet( | ||||||
| 		return nil, gtserror.NewErrorNotFound(err) | 		return nil, gtserror.NewErrorNotFound(err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Load timeline data. | 	// Fetch status timeline for list. | ||||||
| 	return p.getTimeline(ctx, | 	return p.getStatusTimeline(ctx, | ||||||
| 
 | 
 | ||||||
| 		// Auth'd | 		// Auth'd | ||||||
| 		// account. | 		// account. | ||||||
| 		requester, | 		requester, | ||||||
| 
 | 
 | ||||||
| 		// List timeline cache for list with ID. | 		// Per-account home timeline cache. | ||||||
| 		p.state.Caches.Timelines.List.Get(listID), | 		p.state.Caches.Timelines.List.MustGet(requester.ID), | ||||||
| 
 | 
 | ||||||
| 		// Current | 		// Current | ||||||
| 		// page. | 		// page. | ||||||
| 		page, | 		page, | ||||||
| 
 | 
 | ||||||
| 		// List timeline endpoint. | 		// List timeline ID's endpoint. | ||||||
| 		"/api/v1/timelines/list/"+listID, | 		"/api/v1/timelines/list/"+listID, | ||||||
| 
 | 
 | ||||||
| 		// No page | 		// No page | ||||||
|  | @ -89,59 +88,22 @@ func (p *Processor) ListTimelineGet( | ||||||
| 		// Status filter context. | 		// Status filter context. | ||||||
| 		statusfilter.FilterContextHome, | 		statusfilter.FilterContextHome, | ||||||
| 
 | 
 | ||||||
| 		// Timeline cache load function, used to further hydrate cache where necessary. | 		// Database load function. | ||||||
| 		func(page *paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error) { | 		func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) { | ||||||
| 
 | 			return p.state.DB.GetListTimeline(ctx, requester.ID, pg) | ||||||
| 			// Fetch requesting account's list timeline page. |  | ||||||
| 			statuses, err = p.state.DB.GetListTimeline(ctx, |  | ||||||
| 				listID, |  | ||||||
| 				page, |  | ||||||
| 			) |  | ||||||
| 			if err != nil && !errors.Is(err, db.ErrNoEntries) { |  | ||||||
| 				return nil, nil, gtserror.Newf("error getting statuses: %w", err) |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			if len(statuses) == 0 { |  | ||||||
| 				// No more to load. |  | ||||||
| 				return nil, nil, nil |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			// Get the lowest and highest |  | ||||||
| 			// ID values, used for next pg. |  | ||||||
| 			lo := statuses[len(statuses)-1].ID |  | ||||||
| 			hi := statuses[0].ID |  | ||||||
| 
 |  | ||||||
| 			// Set next paging value. |  | ||||||
| 			page = page.Next(lo, hi) |  | ||||||
| 
 |  | ||||||
| 			for i := 0; i < len(statuses); { |  | ||||||
| 				// Get status at idx. |  | ||||||
| 				status := statuses[i] |  | ||||||
| 
 |  | ||||||
| 				// Check whether status should be show on home timeline. |  | ||||||
| 				visible, err := p.visFilter.StatusHomeTimelineable(ctx, |  | ||||||
| 					requester, |  | ||||||
| 					status, |  | ||||||
| 				) |  | ||||||
| 				if err != nil { |  | ||||||
| 					return nil, nil, gtserror.Newf("error checking visibility: %w", err) |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				if !visible { |  | ||||||
| 					// Status not visible to home timeline. |  | ||||||
| 					statuses = slices.Delete(statuses, i, i+1) |  | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				// Iter. |  | ||||||
| 				i++ |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			return |  | ||||||
| 		}, | 		}, | ||||||
| 
 | 
 | ||||||
| 		// No furthering | 		// Pre-filtering function, | ||||||
| 		// filter function. | 		// i.e. filter before caching. | ||||||
|  | 		func(s *gtsmodel.Status) (bool, error) { | ||||||
|  | 
 | ||||||
|  | 			// Check the visibility of passed status to requesting user. | ||||||
|  | 			ok, err := p.visFilter.StatusHomeTimelineable(ctx, requester, s) | ||||||
|  | 			return !ok, err | ||||||
|  | 		}, | ||||||
|  | 
 | ||||||
|  | 		// Post-filtering function, | ||||||
|  | 		// i.e. filter after caching. | ||||||
| 		nil, | 		nil, | ||||||
| 	) | 	) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -19,13 +19,9 @@ package timeline | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"errors" |  | ||||||
| 	"net/url" | 	"net/url" | ||||||
| 	"slices" |  | ||||||
| 	"strconv" |  | ||||||
| 
 | 
 | ||||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" |  | ||||||
| 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||||
|  | @ -42,9 +38,7 @@ func (p *Processor) PublicTimelineGet( | ||||||
| 	*apimodel.PageableResponse, | 	*apimodel.PageableResponse, | ||||||
| 	gtserror.WithCode, | 	gtserror.WithCode, | ||||||
| ) { | ) { | ||||||
| 
 | 	return p.getStatusTimeline(ctx, | ||||||
| 	// Load timeline data. |  | ||||||
| 	return p.getTimeline(ctx, |  | ||||||
| 
 | 
 | ||||||
| 		// Auth'd | 		// Auth'd | ||||||
| 		// account. | 		// account. | ||||||
|  | @ -60,68 +54,42 @@ func (p *Processor) PublicTimelineGet( | ||||||
| 		// Public timeline endpoint. | 		// Public timeline endpoint. | ||||||
| 		"/api/v1/timelines/public", | 		"/api/v1/timelines/public", | ||||||
| 
 | 
 | ||||||
| 		// Set local-only timeline page query flag. | 		// Set local-only timeline | ||||||
| 		url.Values{"local": {strconv.FormatBool(local)}}, | 		// page query flag, (this map | ||||||
|  | 		// later gets copied before | ||||||
|  | 		// any further usage). | ||||||
|  | 		func() url.Values { | ||||||
|  | 			if local { | ||||||
|  | 				return localOnlyTrue | ||||||
|  | 			} | ||||||
|  | 			return localOnlyFalse | ||||||
|  | 		}(), | ||||||
| 
 | 
 | ||||||
| 		// Status filter context. | 		// Status filter context. | ||||||
| 		statusfilter.FilterContextPublic, | 		statusfilter.FilterContextPublic, | ||||||
| 
 | 
 | ||||||
| 		// Timeline cache load function, used to further hydrate cache where necessary. | 		// Database load function. | ||||||
| 		func(page *paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error) { | 		func(pg *paging.Page) (statuses []*gtsmodel.Status, err error) { | ||||||
| 
 | 			return p.state.DB.GetPublicTimeline(ctx, pg) | ||||||
| 			// Fetch the global public status timeline page. |  | ||||||
| 			statuses, err = p.state.DB.GetPublicTimeline(ctx, |  | ||||||
| 				page, |  | ||||||
| 			) |  | ||||||
| 			if err != nil && !errors.Is(err, db.ErrNoEntries) { |  | ||||||
| 				return nil, nil, gtserror.Newf("error getting statuses: %w", err) |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			if len(statuses) == 0 { |  | ||||||
| 				// No more to load. |  | ||||||
| 				return nil, nil, nil |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			// Get the lowest and highest |  | ||||||
| 			// ID values, used for next pg. |  | ||||||
| 			lo := statuses[len(statuses)-1].ID |  | ||||||
| 			hi := statuses[0].ID |  | ||||||
| 
 |  | ||||||
| 			// Set next paging value. |  | ||||||
| 			page = page.Next(lo, hi) |  | ||||||
| 
 |  | ||||||
| 			for i := 0; i < len(statuses); { |  | ||||||
| 				// Get status at idx. |  | ||||||
| 				status := statuses[i] |  | ||||||
| 
 |  | ||||||
| 				// Check whether status should be show on public timeline. |  | ||||||
| 				visible, err := p.visFilter.StatusPublicTimelineable(ctx, |  | ||||||
| 					requester, |  | ||||||
| 					status, |  | ||||||
| 				) |  | ||||||
| 				if err != nil { |  | ||||||
| 					return nil, nil, gtserror.Newf("error checking visibility: %w", err) |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				if !visible { |  | ||||||
| 					// Status not visible to home timeline. |  | ||||||
| 					statuses = slices.Delete(statuses, i, i+1) |  | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				// Iter. |  | ||||||
| 				i++ |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			return |  | ||||||
| 		}, | 		}, | ||||||
| 
 | 
 | ||||||
| 		// Per-request filtering function. | 		// Pre-filtering function, | ||||||
| 		func(s *gtsmodel.Status) bool { | 		// i.e. filter before caching. | ||||||
| 			if local { | 		nil, | ||||||
| 				return !*s.Local | 
 | ||||||
|  | 		// Post-filtering function, | ||||||
|  | 		// i.e. filter after caching. | ||||||
|  | 		func(s *gtsmodel.Status) (bool, error) { | ||||||
|  | 
 | ||||||
|  | 			// Remove any non-local statuses | ||||||
|  | 			// if requester wants local-only. | ||||||
|  | 			if local && !*s.Local { | ||||||
|  | 				return true, nil | ||||||
| 			} | 			} | ||||||
| 			return false | 
 | ||||||
|  | 			// Check the visibility of passed status to requesting user. | ||||||
|  | 			ok, err := p.visFilter.StatusPublicTimelineable(ctx, requester, s) | ||||||
|  | 			return !ok, err | ||||||
| 		}, | 		}, | ||||||
| 	) | 	) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -20,11 +20,10 @@ package timeline | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"errors" | 	"errors" | ||||||
|  | 	"net/http" | ||||||
| 	"net/url" | 	"net/url" | ||||||
| 	"slices" |  | ||||||
| 
 | 
 | ||||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/cache" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/cache/timeline" | 	"github.com/superseriousbusiness/gotosocial/internal/cache/timeline" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | 	statusfilter "github.com/superseriousbusiness/gotosocial/internal/filter/status" | ||||||
|  | @ -32,10 +31,19 @@ import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/filter/visibility" | 	"github.com/superseriousbusiness/gotosocial/internal/filter/visibility" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/log" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/paging" | 	"github.com/superseriousbusiness/gotosocial/internal/paging" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/state" | 	"github.com/superseriousbusiness/gotosocial/internal/state" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/typeutils" | 	"github.com/superseriousbusiness/gotosocial/internal/typeutils" | ||||||
|  | 	"github.com/superseriousbusiness/gotosocial/internal/util/xslices" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// pre-prepared URL values to be passed in to | ||||||
|  | 	// paging response forms. The paging package always | ||||||
|  | 	// copies values before any modifications so it's | ||||||
|  | 	// safe to only use a single map variable for these. | ||||||
|  | 	localOnlyTrue  = url.Values{"local": {"true"}} | ||||||
|  | 	localOnlyFalse = url.Values{"local": {"false"}} | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type Processor struct { | type Processor struct { | ||||||
|  | @ -62,7 +70,7 @@ func (p *Processor) getStatusTimeline( | ||||||
| 	filterCtx statusfilter.FilterContext, | 	filterCtx statusfilter.FilterContext, | ||||||
| 	loadPage func(*paging.Page) (statuses []*gtsmodel.Status, err error), | 	loadPage func(*paging.Page) (statuses []*gtsmodel.Status, err error), | ||||||
| 	preFilter func(*gtsmodel.Status) (bool, error), | 	preFilter func(*gtsmodel.Status) (bool, error), | ||||||
| 	postFilter func(*timeline.StatusMeta) bool, | 	postFilter func(*gtsmodel.Status) (bool, error), | ||||||
| ) ( | ) ( | ||||||
| 	*apimodel.PageableResponse, | 	*apimodel.PageableResponse, | ||||||
| 	gtserror.WithCode, | 	gtserror.WithCode, | ||||||
|  | @ -99,7 +107,8 @@ func (p *Processor) getStatusTimeline( | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// ... | 	// ... | ||||||
| 	statuses, err := timeline.Load(ctx, | 	apiStatuses, lo, hi, err := timeline.Load(ctx, | ||||||
|  | 
 | ||||||
| 		page, | 		page, | ||||||
| 
 | 
 | ||||||
| 		// ... | 		// ... | ||||||
|  | @ -110,10 +119,12 @@ func (p *Processor) getStatusTimeline( | ||||||
| 			return p.state.DB.GetStatusesByIDs(ctx, ids) | 			return p.state.DB.GetStatusesByIDs(ctx, ids) | ||||||
| 		}, | 		}, | ||||||
| 
 | 
 | ||||||
| 		// ... | 		// Pre-filtering function, | ||||||
|  | 		// i.e. filter before caching. | ||||||
| 		preFilter, | 		preFilter, | ||||||
| 
 | 
 | ||||||
| 		// ... | 		// Post-filtering function, | ||||||
|  | 		// i.e. filter after caching. | ||||||
| 		postFilter, | 		postFilter, | ||||||
| 
 | 
 | ||||||
| 		// ... | 		// ... | ||||||
|  | @ -132,192 +143,16 @@ func (p *Processor) getStatusTimeline( | ||||||
| 		}, | 		}, | ||||||
| 	) | 	) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		err := gtserror.Newf("error loading timeline: %w", err) | ||||||
| 	} | 		return nil, gtserror.WrapWithCode(http.StatusInternalServerError, err) | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (p *Processor) getTimeline( |  | ||||||
| 	ctx context.Context, |  | ||||||
| 	requester *gtsmodel.Account, |  | ||||||
| 	timeline *timeline.StatusTimeline, |  | ||||||
| 	page *paging.Page, |  | ||||||
| 	pgPath string, // timeline page path |  | ||||||
| 	pgQuery url.Values, // timeline query parameters |  | ||||||
| 	filterCtx statusfilter.FilterContext, |  | ||||||
| 	load func(*paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error), // timeline cache load function |  | ||||||
| 	filter func(*gtsmodel.Status) bool, // per-request filtering function, done AFTER timeline caching |  | ||||||
| ) ( |  | ||||||
| 	*apimodel.PageableResponse, |  | ||||||
| 	gtserror.WithCode, |  | ||||||
| ) { |  | ||||||
| 	// Load timeline with cache / loader funcs. |  | ||||||
| 	statuses, errWithCode := p.loadTimeline(ctx, |  | ||||||
| 		timeline, |  | ||||||
| 		page, |  | ||||||
| 		load, |  | ||||||
| 		filter, |  | ||||||
| 	) |  | ||||||
| 	if errWithCode != nil { |  | ||||||
| 		return nil, errWithCode |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if len(statuses) == 0 { | 	// Package returned API statuses as pageable response. | ||||||
| 		// Check for an empty timeline rsp. |  | ||||||
| 		return paging.EmptyResponse(), nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Get the lowest and highest |  | ||||||
| 	// ID values, used for paging. |  | ||||||
| 	lo := statuses[len(statuses)-1].ID |  | ||||||
| 	hi := statuses[0].ID |  | ||||||
| 
 |  | ||||||
| 	var ( |  | ||||||
| 		filters []*gtsmodel.Filter |  | ||||||
| 		mutes   *usermute.CompiledUserMuteList |  | ||||||
| 	) |  | ||||||
| 
 |  | ||||||
| 	if requester != nil { |  | ||||||
| 		var err error |  | ||||||
| 
 |  | ||||||
| 		// Fetch all filters relevant for requesting account. |  | ||||||
| 		filters, err = p.state.DB.GetFiltersForAccountID(ctx, |  | ||||||
| 			requester.ID, |  | ||||||
| 		) |  | ||||||
| 		if err != nil && !errors.Is(err, db.ErrNoEntries) { |  | ||||||
| 			err := gtserror.Newf("error getting account filters: %w", err) |  | ||||||
| 			return nil, gtserror.NewErrorInternalError(err) |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Get a list of all account mutes for requester. |  | ||||||
| 		allMutes, err := p.state.DB.GetAccountMutes(ctx, |  | ||||||
| 			requester.ID, |  | ||||||
| 			nil, // nil page, i.e. all |  | ||||||
| 		) |  | ||||||
| 		if err != nil && !errors.Is(err, db.ErrNoEntries) { |  | ||||||
| 			err := gtserror.Newf("error getting account mutes: %w", err) |  | ||||||
| 			return nil, gtserror.NewErrorInternalError(err) |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Compile all account mutes to useable form. |  | ||||||
| 		mutes = usermute.NewCompiledUserMuteList(allMutes) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// NOTE: |  | ||||||
| 	// Right now this is not ideal, as we perform mute and |  | ||||||
| 	// status filtering *after* the above load loop, so we |  | ||||||
| 	// could end up with no statuses still AFTER all loading. |  | ||||||
| 	// |  | ||||||
| 	// In a PR coming *soon* we will move the filtering and |  | ||||||
| 	// status muting into separate module similar to the visibility |  | ||||||
| 	// filtering and caching which should move it to the above |  | ||||||
| 	// load loop and provided function. |  | ||||||
| 
 |  | ||||||
| 	// API response requires them in interface{} form. |  | ||||||
| 	items := make([]interface{}, 0, len(statuses)) |  | ||||||
| 
 |  | ||||||
| 	for _, status := range statuses { |  | ||||||
| 		// Convert internal status model to frontend model. |  | ||||||
| 		apiStatus, err := p.converter.StatusToAPIStatus(ctx, |  | ||||||
| 			status, |  | ||||||
| 			requester, |  | ||||||
| 			filterCtx, |  | ||||||
| 			filters, |  | ||||||
| 			mutes, |  | ||||||
| 		) |  | ||||||
| 		if err != nil && !errors.Is(err, statusfilter.ErrHideStatus) { |  | ||||||
| 			log.Errorf(ctx, "error converting status: %v", err) |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if apiStatus != nil { |  | ||||||
| 			// Append status to return slice. |  | ||||||
| 			items = append(items, apiStatus) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Package converted API statuses as pageable response. |  | ||||||
| 	return paging.PackageResponse(paging.ResponseParams{ | 	return paging.PackageResponse(paging.ResponseParams{ | ||||||
| 		Items: items, | 		Items: xslices.ToAny(apiStatuses), | ||||||
|  | 		Path:  pgPath, | ||||||
| 		Next:  page.Next(lo, hi), | 		Next:  page.Next(lo, hi), | ||||||
| 		Prev:  page.Prev(lo, hi), | 		Prev:  page.Prev(lo, hi), | ||||||
| 		Path:  pgPath, |  | ||||||
| 		Query: pgQuery, | 		Query: pgQuery, | ||||||
| 	}), nil | 	}), nil | ||||||
| } | } | ||||||
| 
 |  | ||||||
| func (p *Processor) loadTimeline( |  | ||||||
| 	ctx context.Context, |  | ||||||
| 	timeline *cache.TimelineCache[*gtsmodel.Status], |  | ||||||
| 	page *paging.Page, |  | ||||||
| 	load func(*paging.Page) (statuses []*gtsmodel.Status, next *paging.Page, err error), |  | ||||||
| 	filter func(*gtsmodel.Status) bool, |  | ||||||
| ) ( |  | ||||||
| 	[]*gtsmodel.Status, |  | ||||||
| 	gtserror.WithCode, |  | ||||||
| ) { |  | ||||||
| 	if load == nil { |  | ||||||
| 		// nil check outside |  | ||||||
| 		// below main loop. |  | ||||||
| 		panic("nil func") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if page == nil { |  | ||||||
| 		const text = "timeline must be paged" |  | ||||||
| 		return nil, gtserror.NewErrorBadRequest( |  | ||||||
| 			errors.New(text), |  | ||||||
| 			text, |  | ||||||
| 		) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Try load statuses from cache. |  | ||||||
| 	statuses := timeline.Select(page) |  | ||||||
| 
 |  | ||||||
| 	// Filter statuses using provided function. |  | ||||||
| 	statuses = slices.DeleteFunc(statuses, filter) |  | ||||||
| 
 |  | ||||||
| 	// Check if more statuses need to be loaded. |  | ||||||
| 	if limit := page.Limit; len(statuses) < limit { |  | ||||||
| 
 |  | ||||||
| 		// Set first page |  | ||||||
| 		// query to load. |  | ||||||
| 		nextPg := page |  | ||||||
| 
 |  | ||||||
| 		for i := 0; i < 5; i++ { |  | ||||||
| 			var err error |  | ||||||
| 			var next []*gtsmodel.Status |  | ||||||
| 
 |  | ||||||
| 			// Load next timeline statuses. |  | ||||||
| 			next, nextPg, err = load(nextPg) |  | ||||||
| 			if err != nil { |  | ||||||
| 				err := gtserror.Newf("error loading timeline: %w", err) |  | ||||||
| 				return nil, gtserror.NewErrorInternalError(err) |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			// An empty next page means no more. |  | ||||||
| 			if len(next) == 0 && nextPg == nil { |  | ||||||
| 				break |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			// Cache loaded statuses. |  | ||||||
| 			timeline.Insert(next...) |  | ||||||
| 
 |  | ||||||
| 			// Filter statuses using provided function, |  | ||||||
| 			// this must be done AFTER cache insert but |  | ||||||
| 			// BEFORE adding to slice, as this is used |  | ||||||
| 			// for request-specific timeline filtering, |  | ||||||
| 			// as opposed to filtering for entire cache. |  | ||||||
| 			next = slices.DeleteFunc(next, filter) |  | ||||||
| 
 |  | ||||||
| 			// Append loaded statuses to return. |  | ||||||
| 			statuses = append(statuses, next...) |  | ||||||
| 
 |  | ||||||
| 			if len(statuses) >= limit { |  | ||||||
| 				// We loaded all the statuses |  | ||||||
| 				// that were requested of us! |  | ||||||
| 				break |  | ||||||
| 			} |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return statuses, nil |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  | @ -682,13 +682,8 @@ func (p *clientAPI) CreateBlock(ctx context.Context, cMsg *messages.FromClientAP | ||||||
| 		return gtserror.Newf("%T not parseable as *gtsmodel.Block", cMsg.GTSModel) | 		return gtserror.Newf("%T not parseable as *gtsmodel.Block", cMsg.GTSModel) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Remove blocker's statuses from blocker's timeline. | 	// Perform any necessary timeline invalidation. | ||||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.AccountID, "AccountID", block.TargetAccountID) | 	p.surface.invalidateTimelinesForBlock(ctx, block) | ||||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.AccountID, "BoostOfAccountID", block.TargetAccountID) |  | ||||||
| 
 |  | ||||||
| 	// Remove blockee's statuses from blockee's timeline. |  | ||||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.TargetAccountID, "AccountID", block.AccountID) |  | ||||||
| 	p.state.Caches.Timelines.Home.InvalidateFrom(block.TargetAccountID, "BoostOfAccountID", block.AccountID) |  | ||||||
| 
 | 
 | ||||||
| 	// TODO: same with notifications? | 	// TODO: same with notifications? | ||||||
| 	// TODO: same with bookmarks? | 	// TODO: same with bookmarks? | ||||||
|  |  | ||||||
|  | @ -701,53 +701,19 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e | ||||||
| 		return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel) | 		return gtserror.Newf("%T not parseable as *gtsmodel.Block", fMsg.GTSModel) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Remove each account's posts from the other's timelines. | 	// Perform any necessary timeline invalidation. | ||||||
| 	// | 	p.surface.invalidateTimelinesForBlock(ctx, block) | ||||||
| 	// First home timelines. |  | ||||||
| 	if err := p.state.Timelines.Home.WipeItemsFromAccountID( |  | ||||||
| 		ctx, |  | ||||||
| 		block.AccountID, |  | ||||||
| 		block.TargetAccountID, |  | ||||||
| 	); err != nil { |  | ||||||
| 		log.Errorf(ctx, "error wiping items from block -> target's home timeline: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := p.state.Timelines.Home.WipeItemsFromAccountID( |  | ||||||
| 		ctx, |  | ||||||
| 		block.TargetAccountID, |  | ||||||
| 		block.AccountID, |  | ||||||
| 	); err != nil { |  | ||||||
| 		log.Errorf(ctx, "error wiping items from target -> block's home timeline: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Now list timelines. |  | ||||||
| 	if err := p.state.Timelines.List.WipeItemsFromAccountID( |  | ||||||
| 		ctx, |  | ||||||
| 		block.AccountID, |  | ||||||
| 		block.TargetAccountID, |  | ||||||
| 	); err != nil { |  | ||||||
| 		log.Errorf(ctx, "error wiping items from block -> target's list timeline(s): %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := p.state.Timelines.List.WipeItemsFromAccountID( |  | ||||||
| 		ctx, |  | ||||||
| 		block.TargetAccountID, |  | ||||||
| 		block.AccountID, |  | ||||||
| 	); err != nil { |  | ||||||
| 		log.Errorf(ctx, "error wiping items from target -> block's list timeline(s): %v", err) |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// Remove any follows that existed between blocker + blockee. | 	// Remove any follows that existed between blocker + blockee. | ||||||
| 	if err := p.state.DB.DeleteFollow( | 	// (note this handles removing any necessary list entries). | ||||||
| 		ctx, | 	if err := p.state.DB.DeleteFollow(ctx, | ||||||
| 		block.AccountID, | 		block.AccountID, | ||||||
| 		block.TargetAccountID, | 		block.TargetAccountID, | ||||||
| 	); err != nil { | 	); err != nil { | ||||||
| 		log.Errorf(ctx, "error deleting follow from block -> target: %v", err) | 		log.Errorf(ctx, "error deleting follow from block -> target: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := p.state.DB.DeleteFollow( | 	if err := p.state.DB.DeleteFollow(ctx, | ||||||
| 		ctx, |  | ||||||
| 		block.TargetAccountID, | 		block.TargetAccountID, | ||||||
| 		block.AccountID, | 		block.AccountID, | ||||||
| 	); err != nil { | 	); err != nil { | ||||||
|  | @ -755,16 +721,14 @@ func (p *fediAPI) CreateBlock(ctx context.Context, fMsg *messages.FromFediAPI) e | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Remove any follow requests that existed between blocker + blockee. | 	// Remove any follow requests that existed between blocker + blockee. | ||||||
| 	if err := p.state.DB.DeleteFollowRequest( | 	if err := p.state.DB.DeleteFollowRequest(ctx, | ||||||
| 		ctx, |  | ||||||
| 		block.AccountID, | 		block.AccountID, | ||||||
| 		block.TargetAccountID, | 		block.TargetAccountID, | ||||||
| 	); err != nil { | 	); err != nil { | ||||||
| 		log.Errorf(ctx, "error deleting follow request from block -> target: %v", err) | 		log.Errorf(ctx, "error deleting follow request from block -> target: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := p.state.DB.DeleteFollowRequest( | 	if err := p.state.DB.DeleteFollowRequest(ctx, | ||||||
| 		ctx, |  | ||||||
| 		block.TargetAccountID, | 		block.TargetAccountID, | ||||||
| 		block.AccountID, | 		block.AccountID, | ||||||
| 	); err != nil { | 	); err != nil { | ||||||
|  |  | ||||||
|  | @ -553,13 +553,8 @@ func (s *Surface) tagFollowersForStatus( | ||||||
| // deleteStatusFromTimelines completely removes the given status from all timelines. | // deleteStatusFromTimelines completely removes the given status from all timelines. | ||||||
| // It will also stream deletion of the status to all open streams. | // It will also stream deletion of the status to all open streams. | ||||||
| func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) error { | func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string) error { | ||||||
| 	if err := s.State.Timelines.Home.WipeItemFromAllTimelines(ctx, statusID); err != nil { | 	s.State.Caches.Timelines.Home.RemoveByStatusIDs(statusID) | ||||||
| 		return err | 	s.State.Caches.Timelines.List.RemoveByStatusIDs(statusID) | ||||||
| 	} |  | ||||||
| 	if err := s.State.Timelines.List.WipeItemFromAllTimelines(ctx, statusID); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	s.Stream.Delete(ctx, statusID) | 	s.Stream.Delete(ctx, statusID) | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  | @ -569,19 +564,8 @@ func (s *Surface) deleteStatusFromTimelines(ctx context.Context, statusID string | ||||||
| // stats, boost counts, etc) next time it's fetched by the timeline owner. This goes | // stats, boost counts, etc) next time it's fetched by the timeline owner. This goes | ||||||
| // both for the status itself, and for any boosts of the status. | // both for the status itself, and for any boosts of the status. | ||||||
| func (s *Surface) invalidateStatusFromTimelines(ctx context.Context, statusID string) { | func (s *Surface) invalidateStatusFromTimelines(ctx context.Context, statusID string) { | ||||||
| 	if err := s.State.Timelines.Home.UnprepareItemFromAllTimelines(ctx, statusID); err != nil { | 	s.State.Caches.Timelines.Home.UnprepareByStatusIDs(statusID) | ||||||
| 		log. | 	s.State.Caches.Timelines.List.UnprepareByStatusIDs(statusID) | ||||||
| 			WithContext(ctx). |  | ||||||
| 			WithField("statusID", statusID). |  | ||||||
| 			Errorf("error unpreparing status from home timelines: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := s.State.Timelines.List.UnprepareItemFromAllTimelines(ctx, statusID); err != nil { |  | ||||||
| 		log. |  | ||||||
| 			WithContext(ctx). |  | ||||||
| 			WithField("statusID", statusID). |  | ||||||
| 			Errorf("error unpreparing status from list timelines: %v", err) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // timelineStatusUpdate looks up HOME and LIST timelines of accounts | // timelineStatusUpdate looks up HOME and LIST timelines of accounts | ||||||
|  | @ -860,3 +844,57 @@ func (s *Surface) timelineStatusUpdateForTagFollowers( | ||||||
| 	} | 	} | ||||||
| 	return errs.Combine() | 	return errs.Combine() | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // invalidateTimelinesForBlock ... | ||||||
|  | func (s *Surface) invalidateTimelinesForBlock(ctx context.Context, block *gtsmodel.Block) { | ||||||
|  | 
 | ||||||
|  | 	// Check if origin is local account, | ||||||
|  | 	// i.e. has status timeline caches. | ||||||
|  | 	if block.Account.IsLocal() { | ||||||
|  | 
 | ||||||
|  | 		// Remove target's statuses | ||||||
|  | 		// from origin's home timeline. | ||||||
|  | 		s.State.Caches.Timelines.Home. | ||||||
|  | 			MustGet(block.AccountID). | ||||||
|  | 			RemoveByAccountIDs(block.TargetAccountID) | ||||||
|  | 
 | ||||||
|  | 		// Get the IDs of any lists created by origin account. | ||||||
|  | 		listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, block.AccountID) | ||||||
|  | 		if err != nil { | ||||||
|  | 			log.Errorf(ctx, "error getting account's list IDs for %s: %v", block.URI, err) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Remove target's statuses from | ||||||
|  | 		// any of origin's list timelines. | ||||||
|  | 		for _, listID := range listIDs { | ||||||
|  | 			s.State.Caches.Timelines.List. | ||||||
|  | 				MustGet(listID). | ||||||
|  | 				RemoveByAccountIDs(block.TargetAccountID) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Check if target is local account, | ||||||
|  | 	// i.e. has status timeline caches. | ||||||
|  | 	if block.TargetAccount.IsLocal() { | ||||||
|  | 
 | ||||||
|  | 		// Remove origin's statuses | ||||||
|  | 		// from target's home timeline. | ||||||
|  | 		s.State.Caches.Timelines.Home. | ||||||
|  | 			MustGet(block.TargetAccountID). | ||||||
|  | 			RemoveByAccountIDs(block.AccountID) | ||||||
|  | 
 | ||||||
|  | 		// Get the IDs of any lists created by target account. | ||||||
|  | 		listIDs, err := s.State.DB.GetListIDsByAccountID(ctx, block.TargetAccountID) | ||||||
|  | 		if err != nil { | ||||||
|  | 			log.Errorf(ctx, "error getting target account's list IDs for %s: %v", block.URI, err) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Remove origin's statuses from | ||||||
|  | 		// any of target's list timelines. | ||||||
|  | 		for _, listID := range listIDs { | ||||||
|  | 			s.State.Caches.Timelines.List. | ||||||
|  | 				MustGet(listID). | ||||||
|  | 				RemoveByAccountIDs(block.AccountID) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -21,6 +21,16 @@ import ( | ||||||
| 	"slices" | 	"slices" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | // ToAny converts a slice of any input type | ||||||
|  | // to the abstrace empty interface slice type. | ||||||
|  | func ToAny[T any](in []T) []any { | ||||||
|  | 	out := make([]any, len(in)) | ||||||
|  | 	for i, v := range in { | ||||||
|  | 		out[i] = v | ||||||
|  | 	} | ||||||
|  | 	return out | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // GrowJust increases slice capacity to guarantee | // GrowJust increases slice capacity to guarantee | ||||||
| // extra room 'size', where in the case that it does | // extra room 'size', where in the case that it does | ||||||
| // need to allocate more it ONLY allocates 'size' extra. | // need to allocate more it ONLY allocates 'size' extra. | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue