mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 08:22:27 -05:00 
			
		
		
		
	reslice the output if it's beyond length of 'lim'
This commit is contained in:
		
					parent
					
						
							
								28499335ec
							
						
					
				
			
			
				commit
				
					
						5ef24340f7
					
				
			
		
					 2 changed files with 84 additions and 109 deletions
				
			
		
							
								
								
									
										185
									
								
								internal/cache/timeline/status.go
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										185
									
								
								internal/cache/timeline/status.go
									
										
									
									
										vendored
									
									
								
							|  | @ -346,6 +346,13 @@ func (t *StatusTimeline) Load( | ||||||
| 	ord := page.Order() | 	ord := page.Order() | ||||||
| 	dir := toDirection(ord) | 	dir := toDirection(ord) | ||||||
| 
 | 
 | ||||||
|  | 	// Use a copy of current page so | ||||||
|  | 	// we can repeatedly update it. | ||||||
|  | 	nextPg := new(paging.Page) | ||||||
|  | 	*nextPg = *page | ||||||
|  | 	nextPg.Min.Value = lo | ||||||
|  | 	nextPg.Max.Value = hi | ||||||
|  | 
 | ||||||
| 	// First we attempt to load status | 	// First we attempt to load status | ||||||
| 	// metadata entries from the timeline | 	// metadata entries from the timeline | ||||||
| 	// cache, up to given limit. | 	// cache, up to given limit. | ||||||
|  | @ -365,15 +372,6 @@ func (t *StatusTimeline) Load( | ||||||
| 			slices.Reverse(metas) | 			slices.Reverse(metas) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Update paging values |  | ||||||
| 		// based on returned data. |  | ||||||
| 		lo, hi = nextPageParams( |  | ||||||
| 			lo, hi, |  | ||||||
| 			metas[len(metas)-1].ID, |  | ||||||
| 			metas[0].ID, |  | ||||||
| 			ord, |  | ||||||
| 		) |  | ||||||
| 
 |  | ||||||
| 		// Before we can do any filtering, we need | 		// Before we can do any filtering, we need | ||||||
| 		// to load status models for cached entries. | 		// to load status models for cached entries. | ||||||
| 		err := loadStatuses(metas, loadIDs) | 		err := loadStatuses(metas, loadIDs) | ||||||
|  | @ -381,28 +379,38 @@ func (t *StatusTimeline) Load( | ||||||
| 			return nil, "", "", gtserror.Newf("error loading statuses: %w", err) | 			return nil, "", "", gtserror.Newf("error loading statuses: %w", err) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | 		// Update paging values | ||||||
|  | 		// based on returned data. | ||||||
|  | 		nextPageParams(nextPg, | ||||||
|  | 			metas[len(metas)-1].ID, | ||||||
|  | 			metas[0].ID, | ||||||
|  | 			ord, | ||||||
|  | 		) | ||||||
|  | 
 | ||||||
|  | 		// Before any further loading, | ||||||
|  | 		// store current lo,hi values, | ||||||
|  | 		// used for possible return. | ||||||
|  | 		lo = metas[len(metas)-1].ID | ||||||
|  | 		hi = metas[0].ID | ||||||
|  | 
 | ||||||
| 		// Drop all entries we failed to load statuses for. | 		// Drop all entries we failed to load statuses for. | ||||||
| 		metas = slices.DeleteFunc(metas, (*StatusMeta).isLoaded) | 		metas = slices.DeleteFunc(metas, (*StatusMeta).isLoaded) | ||||||
| 
 | 
 | ||||||
| 		// Perform any post-filtering on cached status entries. | 		// Perform post-filtering on cached status entries. | ||||||
| 		metas, _, err = doStatusPostFilter(metas, postFilter) | 		metas, err = doStatusPostFilter(metas, postFilter) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | 			return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	var filtered []*StatusMeta | 	// Track all newly loaded status entries | ||||||
|  | 	// AFTER 'preFilter', but before 'postFilter', | ||||||
|  | 	// to later insert into timeline cache. | ||||||
|  | 	var justLoaded []*StatusMeta | ||||||
| 
 | 
 | ||||||
| 	// Check whether loaded enough from cache. | 	// Check whether loaded enough from cache. | ||||||
| 	if need := lim - len(metas); need > 0 { | 	if need := lim - len(metas); need > 0 { | ||||||
| 
 | 
 | ||||||
| 		// Use a copy of current page so |  | ||||||
| 		// we can repeatedly update it. |  | ||||||
| 		nextPg := new(paging.Page) |  | ||||||
| 		*nextPg = *page |  | ||||||
| 		nextPg.Min.Value = lo |  | ||||||
| 		nextPg.Max.Value = hi |  | ||||||
| 
 |  | ||||||
| 		// Perform a maximum of 5 | 		// Perform a maximum of 5 | ||||||
| 		// load attempts fetching | 		// load attempts fetching | ||||||
| 		// statuses to reach limit. | 		// statuses to reach limit. | ||||||
|  | @ -422,17 +430,12 @@ func (t *StatusTimeline) Load( | ||||||
| 
 | 
 | ||||||
| 			// Update paging values | 			// Update paging values | ||||||
| 			// based on returned data. | 			// based on returned data. | ||||||
| 			lo, hi = nextPageParams( | 			nextPageParams(nextPg, | ||||||
| 				lo, hi, |  | ||||||
| 				statuses[len(statuses)-1].ID, | 				statuses[len(statuses)-1].ID, | ||||||
| 				statuses[0].ID, | 				statuses[0].ID, | ||||||
| 				ord, | 				ord, | ||||||
| 			) | 			) | ||||||
| 
 | 
 | ||||||
| 			// Update paging params. |  | ||||||
| 			nextPg.Min.Value = lo |  | ||||||
| 			nextPg.Max.Value = hi |  | ||||||
| 
 |  | ||||||
| 			// Perform any pre-filtering on newly loaded statuses. | 			// Perform any pre-filtering on newly loaded statuses. | ||||||
| 			statuses, err = doStatusPreFilter(statuses, preFilter) | 			statuses, err = doStatusPreFilter(statuses, preFilter) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
|  | @ -450,15 +453,17 @@ func (t *StatusTimeline) Load( | ||||||
| 			// the cache in prepare() below. | 			// the cache in prepare() below. | ||||||
| 			uncached := toStatusMeta(statuses) | 			uncached := toStatusMeta(statuses) | ||||||
| 
 | 
 | ||||||
| 			// Perform any post-filtering on recently loaded timeline entries. | 			// Before any filtering append to newly loaded. | ||||||
| 			newMetas, newFiltered, err := doStatusPostFilter(uncached, postFilter) | 			justLoaded = append(justLoaded, uncached...) | ||||||
|  | 
 | ||||||
|  | 			// Perform any post-filtering on loaded timeline entries. | ||||||
|  | 			filtered, err := doStatusPostFilter(uncached, postFilter) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | 				return nil, "", "", gtserror.Newf("error post-filtering statuses: %w", err) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// Append the meta to their relevant slices. | 			// Append newly filtered meta entries. | ||||||
| 			filtered = append(filtered, newFiltered...) | 			metas = append(metas, filtered...) | ||||||
| 			metas = append(metas, newMetas...) |  | ||||||
| 
 | 
 | ||||||
| 			// Check if we reached | 			// Check if we reached | ||||||
| 			// requested page limit. | 			// requested page limit. | ||||||
|  | @ -468,65 +473,50 @@ func (t *StatusTimeline) Load( | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Reset the lo, hi paging parameters, |  | ||||||
| 	// so we can set the final return vals. |  | ||||||
| 	lo, hi = "", "" |  | ||||||
| 
 |  | ||||||
| 	// Returned frontend API models. | 	// Returned frontend API models. | ||||||
| 	var apiStatuses []*apimodel.Status | 	var apiStatuses []*apimodel.Status | ||||||
| 	if len(metas) > 0 { | 	if len(metas) > 0 { | ||||||
| 		var err error | 		switch { | ||||||
|  | 		case len(metas) <= lim: | ||||||
|  | 			// nothing to do | ||||||
| 
 | 
 | ||||||
| 		// Using meta and funcs, prepare frontend API models. | 		case ord.Ascending(): | ||||||
| 		apiStatuses, err = t.prepare(ctx, metas, prepareAPI) | 			// Ascending order was requested | ||||||
| 		if err != nil { | 			// and we have more than limit, so | ||||||
| 			return nil, "", "", gtserror.Newf("error preparing api statuses: %w", err) | 			// trim extra metadata from end. | ||||||
|  | 			metas = metas[:lim] | ||||||
|  | 
 | ||||||
|  | 		// descending | ||||||
|  | 		default: | ||||||
|  | 			// Descending order was requested | ||||||
|  | 			// and we have more than limit, so | ||||||
|  | 			// trim extra metadata from start. | ||||||
|  | 			metas = metas[len(metas)-lim:] | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Get lo / hi from meta. | 		// Using meta and funcs, prepare frontend API models. | ||||||
|  | 		apiStatuses = prepareStatuses(ctx, metas, prepareAPI) | ||||||
|  | 
 | ||||||
|  | 		if hi == "" { | ||||||
|  | 			// Only set hi value if not | ||||||
|  | 			// already set, i.e. we never | ||||||
|  | 			// fetched any cached values. | ||||||
|  | 			hi = metas[0].ID | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Set lo value from fetched. | ||||||
| 		lo = metas[len(metas)-1].ID | 		lo = metas[len(metas)-1].ID | ||||||
| 		hi = metas[0].ID |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if len(filtered) > 0 { | 	if len(justLoaded) > 0 { | ||||||
| 		// Even if we don't return them, insert | 		// Even if we don't return them, insert | ||||||
| 		// the excess (post-filtered) into cache. | 		// the excess (post-filtered) into cache. | ||||||
| 		t.cache.Insert(filtered...) | 		t.cache.Insert(justLoaded...) | ||||||
| 
 |  | ||||||
| 		// Check filtered values for lo / hi values. |  | ||||||
| 		lo = minIf(lo, filtered[len(filtered)-1].ID) |  | ||||||
| 		hi = maxIf(hi, filtered[0].ID) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return apiStatuses, lo, hi, nil | 	return apiStatuses, lo, hi, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func minIf(id1, id2 string) string { |  | ||||||
| 	switch { |  | ||||||
| 	case id1 == "": |  | ||||||
| 		return id2 |  | ||||||
| 	case id2 == "": |  | ||||||
| 		return id1 |  | ||||||
| 	case id1 < id2: |  | ||||||
| 		return id1 |  | ||||||
| 	default: |  | ||||||
| 		return id2 |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func maxIf(id1, id2 string) string { |  | ||||||
| 	switch { |  | ||||||
| 	case id1 == "": |  | ||||||
| 		return id2 |  | ||||||
| 	case id2 == "": |  | ||||||
| 		return id1 |  | ||||||
| 	case id1 > id2: |  | ||||||
| 		return id1 |  | ||||||
| 	default: |  | ||||||
| 		return id2 |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // InsertOne allows you to insert a single status into the timeline, with optional prepared API model. | // InsertOne allows you to insert a single status into the timeline, with optional prepared API model. | ||||||
| func (t *StatusTimeline) InsertOne(status *gtsmodel.Status, prepared *apimodel.Status) { | func (t *StatusTimeline) InsertOne(status *gtsmodel.Status, prepared *apimodel.Status) { | ||||||
| 	t.cache.Insert(&StatusMeta{ | 	t.cache.Insert(&StatusMeta{ | ||||||
|  | @ -676,34 +666,34 @@ func (t *StatusTimeline) UnprepareAll() { | ||||||
| // Trim ... | // Trim ... | ||||||
| func (t *StatusTimeline) Trim(threshold float64) { | func (t *StatusTimeline) Trim(threshold float64) { | ||||||
| 
 | 
 | ||||||
| 	// ... | 	// Default trim dir. | ||||||
| 	dir := structr.Asc | 	dir := structr.Asc | ||||||
| 
 | 
 | ||||||
| 	// ... | 	// Calculate maximum allowed no. | ||||||
|  | 	// items as a percentage of max. | ||||||
| 	max := threshold * float64(t.max) | 	max := threshold * float64(t.max) | ||||||
| 
 | 
 | ||||||
| 	// ... | 	// Try load the last fetched | ||||||
|  | 	// timeline ordering, getting | ||||||
|  | 	// the inverse value for trimming. | ||||||
| 	if p := t.last.Load(); p != nil { | 	if p := t.last.Load(); p != nil { | ||||||
| 		dir = !(*p) | 		dir = !(*p) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// ... | 	// Trim timeline to 'max'. | ||||||
| 	t.cache.Trim(int(max), dir) | 	t.cache.Trim(int(max), dir) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Clear will remove all cached entries from underlying timeline. | // Clear will remove all cached entries from underlying timeline. | ||||||
| func (t *StatusTimeline) Clear() { t.cache.Trim(0, structr.Desc) } | func (t *StatusTimeline) Clear() { t.cache.Trim(0, structr.Desc) } | ||||||
| 
 | 
 | ||||||
| // prepare will take a slice of cached (or, freshly loaded!) StatusMeta{} | // prepareStatuses takes a slice of cached (or, freshly loaded!) StatusMeta{} | ||||||
| // models, and use given functions to return prepared frontend API models. | // models, and use given function to return prepared frontend API models. | ||||||
| func (t *StatusTimeline) prepare( | func prepareStatuses( | ||||||
| 	ctx context.Context, | 	ctx context.Context, | ||||||
| 	meta []*StatusMeta, | 	meta []*StatusMeta, | ||||||
| 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | 	prepareAPI func(*gtsmodel.Status) (*apimodel.Status, error), | ||||||
| ) ( | ) []*apimodel.Status { | ||||||
| 	[]*apimodel.Status, |  | ||||||
| 	error, |  | ||||||
| ) { |  | ||||||
| 	switch { //nolint:gocritic | 	switch { //nolint:gocritic | ||||||
| 	case prepareAPI == nil: | 	case prepareAPI == nil: | ||||||
| 		panic("nil prepare fn") | 		panic("nil prepare fn") | ||||||
|  | @ -712,7 +702,6 @@ func (t *StatusTimeline) prepare( | ||||||
| 	// Iterate the given StatusMeta objects for pre-prepared | 	// Iterate the given StatusMeta objects for pre-prepared | ||||||
| 	// frontend models, otherwise attempting to prepare them. | 	// frontend models, otherwise attempting to prepare them. | ||||||
| 	apiStatuses := make([]*apimodel.Status, 0, len(meta)) | 	apiStatuses := make([]*apimodel.Status, 0, len(meta)) | ||||||
| 	unprepared := make([]*StatusMeta, 0, len(meta)) |  | ||||||
| 	for _, meta := range meta { | 	for _, meta := range meta { | ||||||
| 
 | 
 | ||||||
| 		if meta.loaded == nil { | 		if meta.loaded == nil { | ||||||
|  | @ -730,10 +719,6 @@ func (t *StatusTimeline) prepare( | ||||||
| 				log.Errorf(ctx, "error preparing status %s: %v", meta.loaded.URI, err) | 				log.Errorf(ctx, "error preparing status %s: %v", meta.loaded.URI, err) | ||||||
| 				continue | 				continue | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			// Add this meta to list of unprepared, |  | ||||||
| 			// for later re-caching in the timeline. |  | ||||||
| 			unprepared = append(unprepared, meta) |  | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if meta.prepared != nil { | 		if meta.prepared != nil { | ||||||
|  | @ -745,13 +730,7 @@ func (t *StatusTimeline) prepare( | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if len(unprepared) != 0 { | 	return apiStatuses | ||||||
| 		// Re-insert all (previously) unprepared |  | ||||||
| 		// status meta types into timeline cache. |  | ||||||
| 		t.cache.Insert(unprepared...) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return apiStatuses, nil |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // loadStatuses loads statuses using provided callback | // loadStatuses loads statuses using provided callback | ||||||
|  | @ -844,17 +823,14 @@ func doStatusPreFilter(statuses []*gtsmodel.Status, filter func(*gtsmodel.Status | ||||||
| // doStatusPostFilter performs given filter function on provided status meta, | // doStatusPostFilter performs given filter function on provided status meta, | ||||||
| // expecting that embedded status is already loaded, returning filtered status | // expecting that embedded status is already loaded, returning filtered status | ||||||
| // meta, as well as those *filtered out*. returns early if error is returned. | // meta, as well as those *filtered out*. returns early if error is returned. | ||||||
| func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool, error)) ([]*StatusMeta, []*StatusMeta, error) { | func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool, error)) ([]*StatusMeta, error) { | ||||||
| 
 | 
 | ||||||
| 	// Check for provided | 	// Check for provided | ||||||
| 	// filter function. | 	// filter function. | ||||||
| 	if filter == nil { | 	if filter == nil { | ||||||
| 		return metas, nil, nil | 		return metas, nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Prepare a slice to store filtered statuses. |  | ||||||
| 	filtered := make([]*StatusMeta, 0, len(metas)) |  | ||||||
| 
 |  | ||||||
| 	// Iterate through input metas. | 	// Iterate through input metas. | ||||||
| 	for i := 0; i < len(metas); { | 	for i := 0; i < len(metas); { | ||||||
| 		meta := metas[i] | 		meta := metas[i] | ||||||
|  | @ -862,13 +838,12 @@ func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool | ||||||
| 		// Pass through filter func. | 		// Pass through filter func. | ||||||
| 		ok, err := filter(meta.loaded) | 		ok, err := filter(meta.loaded) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, nil, err | 			return nil, err | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if ok { | 		if ok { | ||||||
| 			// Delete meta and add to filtered. | 			// Delete meta entry from input slice. | ||||||
| 			metas = slices.Delete(metas, i, i+1) | 			metas = slices.Delete(metas, i, i+1) | ||||||
| 			filtered = append(filtered, meta) |  | ||||||
| 			continue | 			continue | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | @ -876,5 +851,5 @@ func doStatusPostFilter(metas []*StatusMeta, filter func(*gtsmodel.Status) (bool | ||||||
| 		i++ | 		i++ | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return metas, filtered, nil | 	return metas, nil | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								internal/cache/timeline/timeline.go
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								internal/cache/timeline/timeline.go
									
										
									
									
										vendored
									
									
								
							|  | @ -29,14 +29,14 @@ import ( | ||||||
| // on the paging order, the cursor value gets | // on the paging order, the cursor value gets | ||||||
| // updated while maintaining the boundary value. | // updated while maintaining the boundary value. | ||||||
| func nextPageParams( | func nextPageParams( | ||||||
| 	curLo, curHi string, | 	page *paging.Page, | ||||||
| 	nextLo, nextHi string, | 	nextLo, nextHi string, | ||||||
| 	order paging.Order, | 	order paging.Order, | ||||||
| ) (lo string, hi string) { | ) { | ||||||
| 	if order.Ascending() { | 	if order.Ascending() { | ||||||
| 		return nextLo, curHi | 		page.Min.Value = nextLo | ||||||
| 	} else /* i.e. descending */ { //nolint:revive | 	} else /* i.e. descending */ { //nolint:revive | ||||||
| 		return curLo, nextHi | 		page.Max.Value = nextHi | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue