mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-11-04 00:12:26 -06:00 
			
		
		
		
	- codeberg.org/gruf/go-bytesize v1.0.3 -> v1.0.4 - codeberg.org/gruf/go-kv/v2 v2.0.6 -> v2.0.7 - codeberg.org/gruf/go-mutexes v1.5.2 -> v1.5.3 - codeberg.org/gruf/go-structr v0.9.7 -> v0.9.8 - codeberg.org/gruf/go-ffmpreg v0.6.8 -> v0.6.9 - github.com/tomnomnom/linkheader HEAD@2018 -> HEAD@2025 all of the above codeberg.org/gruf updates are in preparation for Go1.25, except for bytesize, and also ffmpreg which is a rebuild with the latest version of ffmpeg (v5.1.7) Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4386 Co-authored-by: kim <grufwub@gmail.com> Co-committed-by: kim <grufwub@gmail.com>
		
			
				
	
	
		
			62 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			62 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
package mangler
 | 
						|
 | 
						|
import (
 | 
						|
	"sync/atomic"
 | 
						|
	"unsafe"
 | 
						|
 | 
						|
	"codeberg.org/gruf/go-xunsafe"
 | 
						|
)
 | 
						|
 | 
						|
var manglers cache
 | 
						|
 | 
						|
// cache is a concurrency-safe map[xunsafe.TypeInfo]Mangler
 | 
						|
// cache, designed for heavy reads but with unfortunately expensive
 | 
						|
// writes. it is designed such that after some initial load period
 | 
						|
// in which functions are cached by types, all future ops are reads.
 | 
						|
type cache struct{ p unsafe.Pointer }
 | 
						|
 | 
						|
// Get will check cache for mangler func under key.
 | 
						|
func (c *cache) Get(t xunsafe.TypeInfo) Mangler {
 | 
						|
	if p := c.load(); p != nil {
 | 
						|
		return (*p)[t]
 | 
						|
	}
 | 
						|
	return nil
 | 
						|
}
 | 
						|
 | 
						|
// Put will place given mangler func in cache under key, if not already exists.
 | 
						|
func (c *cache) Put(t xunsafe.TypeInfo, fn Mangler) {
 | 
						|
	for {
 | 
						|
		p := c.load()
 | 
						|
 | 
						|
		var cache map[xunsafe.TypeInfo]Mangler
 | 
						|
 | 
						|
		if p != nil {
 | 
						|
			if _, ok := (*p)[t]; ok {
 | 
						|
				return
 | 
						|
			}
 | 
						|
 | 
						|
			cache = make(map[xunsafe.TypeInfo]Mangler, len(*p)+1)
 | 
						|
			for key, value := range *p {
 | 
						|
				cache[key] = value
 | 
						|
			}
 | 
						|
		} else {
 | 
						|
			cache = make(map[xunsafe.TypeInfo]Mangler, 1)
 | 
						|
		}
 | 
						|
 | 
						|
		cache[t] = fn
 | 
						|
 | 
						|
		if c.cas(p, &cache) {
 | 
						|
			return
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// load is a typed wrapper around atomic.LoadPointer().
 | 
						|
func (c *cache) load() *map[xunsafe.TypeInfo]Mangler {
 | 
						|
	return (*map[xunsafe.TypeInfo]Mangler)(atomic.LoadPointer(&c.p))
 | 
						|
}
 | 
						|
 | 
						|
// cas is a typed wrapper around atomic.CompareAndSwapPointer().
 | 
						|
func (c *cache) cas(old, new *map[xunsafe.TypeInfo]Mangler) bool {
 | 
						|
	return atomic.CompareAndSwapPointer(&c.p, unsafe.Pointer(old), unsafe.Pointer(new))
 | 
						|
}
 |