[chore] update dependencies (#4386)

- codeberg.org/gruf/go-bytesize v1.0.3 -> v1.0.4
- codeberg.org/gruf/go-kv/v2 v2.0.6 -> v2.0.7
- codeberg.org/gruf/go-mutexes v1.5.2 -> v1.5.3
- codeberg.org/gruf/go-structr v0.9.7 -> v0.9.8
- codeberg.org/gruf/go-ffmpreg v0.6.8 -> v0.6.9
- github.com/tomnomnom/linkheader HEAD@2018 -> HEAD@2025

all of the above codeberg.org/gruf updates are in preparation for Go1.25, except for bytesize, and also ffmpreg which is a rebuild with the latest version of ffmpeg (v5.1.7)

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4386
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-08-21 16:41:50 +02:00 committed by kim
commit a79f83cbde
38 changed files with 1246 additions and 964 deletions

View file

@ -1,230 +0,0 @@
//go:build go1.19 && !go1.25
package mangler
import (
"reflect"
"unsafe"
)
func append_uint16(b []byte, u uint16) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
)
}
func append_uint32(b []byte, u uint32) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
)
}
func append_uint64(b []byte, u uint64) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
byte(u>>32),
byte(u>>40),
byte(u>>48),
byte(u>>56),
)
}
type typecontext struct {
isptr bool
direct bool
ntype reflect.Type
rtype reflect.Type
}
func (ctx *typecontext) set_nested(direct bool) {
ctx.direct = ctx.direct && direct && !ctx.isptr
ctx.ntype = ctx.rtype
ctx.rtype = nil
ctx.isptr = false
}
func deref_ptr_mangler(ctx typecontext, mangle Mangler, n uint) Mangler {
if mangle == nil || n == 0 {
panic("bad input")
}
// If this is a direct value type, i.e. non-nested primitive,
// or part of a single-field struct / single element array
// then it can be treated as a direct ptr with 1 less deref.
if ctx.direct {
n--
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
// Deref n number times.
for i := n; i > 0; i-- {
if ptr == nil {
// Check for nil values
buf = append(buf, '0')
return buf
}
// Further deref ptr
buf = append(buf, '1')
ptr = *(*unsafe.Pointer)(ptr)
}
if ptr == nil {
// Final nil val check
buf = append(buf, '0')
return buf
}
// Mangle fully deref'd
buf = append(buf, '1')
buf = mangle(buf, ptr)
return buf
}
}
func iter_slice_mangler(ctx typecontext, mangle Mangler) Mangler {
if ctx.rtype == nil || mangle == nil {
panic("bad input")
}
// memory size of elem.
esz := ctx.rtype.Size()
return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get data as slice hdr.
hdr := (*slice_header)(ptr)
for i := 0; i < hdr.len; i++ {
// Mangle data at slice index.
eptr := array_at(hdr.data, esz, i)
buf = mangle(buf, eptr)
buf = append(buf, ',')
}
if hdr.len > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_array_mangler(ctx typecontext, mangle Mangler) Mangler {
if ctx.rtype == nil || mangle == nil {
panic("bad input")
}
// no. array elements.
n := ctx.ntype.Len()
// Optimize
// easy cases.
switch n {
case 0:
return empty_mangler
case 1:
return mangle
}
// memory size of elem.
esz := ctx.rtype.Size()
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < n; i++ {
// Mangle data at array index.
offset := esz * uintptr(i)
eptr := add(ptr, offset)
buf = mangle(buf, eptr)
buf = append(buf, ',')
}
if n > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_struct_mangler(ctx typecontext, manglers []Mangler) Mangler {
if ctx.rtype == nil || len(manglers) != ctx.ntype.NumField() {
panic("bad input")
}
// Optimized easy cases.
switch len(manglers) {
case 0:
return empty_mangler
case 1:
return manglers[0]
}
type field struct {
mangle Mangler
offset uintptr
}
// Bundle together the fields and manglers.
fields := make([]field, ctx.ntype.NumField())
for i := range fields {
rfield := ctx.ntype.Field(i)
fields[i].offset = rfield.Offset
fields[i].mangle = manglers[i]
if fields[i].mangle == nil {
panic("bad input")
}
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := range fields {
// Get struct field ptr via offset.
fptr := add(ptr, fields[i].offset)
// Mangle the struct field data.
buf = fields[i].mangle(buf, fptr)
buf = append(buf, ',')
}
if len(fields) > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func empty_mangler(buf []byte, _ unsafe.Pointer) []byte {
return buf
}
// array_at returns ptr to index in array at ptr, given element size.
func array_at(ptr unsafe.Pointer, esz uintptr, i int) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + esz*uintptr(i))
}
// add returns the ptr addition of starting ptr and a delta.
func add(ptr unsafe.Pointer, delta uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + delta)
}
type slice_header struct {
data unsafe.Pointer
len int
cap int
}
func eface_data(a any) unsafe.Pointer {
type eface struct{ _, data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&a)).data
}

View file

@ -1,230 +0,0 @@
package mangler
import (
"reflect"
)
// loadMangler is the top-most Mangler load function. It guarantees that a Mangler
// function will be returned for given value interface{} and reflected type. Else panics.
func loadMangler(t reflect.Type) Mangler {
ctx := typecontext{rtype: t}
ctx.direct = true
// Load mangler fn
mng := load(ctx)
if mng != nil {
return mng
}
// No mangler function could be determined
panic("cannot mangle type: " + t.String())
}
// load will load a Mangler or reflect Mangler for given type and iface 'a'.
// Note: allocates new interface value if nil provided, i.e. if coming via reflection.
func load(ctx typecontext) Mangler {
if ctx.rtype == nil {
// There is no reflect type to search by
panic("cannot mangle nil interface{} type")
}
// Search by reflection.
mng := loadReflect(ctx)
if mng != nil {
return mng
}
return nil
}
// loadReflect will load a Mangler (or rMangler) function for the given reflected type info.
// NOTE: this is used as the top level load function for nested reflective searches.
func loadReflect(ctx typecontext) Mangler {
switch ctx.rtype.Kind() {
case reflect.Pointer:
return loadReflectPtr(ctx)
case reflect.String:
return mangle_string
case reflect.Struct:
return loadReflectStruct(ctx)
case reflect.Array:
return loadReflectArray(ctx)
case reflect.Slice:
return loadReflectSlice(ctx)
case reflect.Bool:
return mangle_bool
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int
case reflect.Int8, reflect.Uint8:
return mangle_8bit
case reflect.Int16, reflect.Uint16:
return mangle_16bit
case reflect.Int32, reflect.Uint32:
return mangle_32bit
case reflect.Int64, reflect.Uint64:
return mangle_64bit
case reflect.Float32:
return mangle_32bit
case reflect.Float64:
return mangle_64bit
case reflect.Complex64:
return mangle_64bit
case reflect.Complex128:
return mangle_128bit
default:
return nil
}
}
// loadReflectPtr loads a Mangler (or rMangler) function for a ptr's element type.
// This also handles further dereferencing of any further ptr indrections (e.g. ***int).
func loadReflectPtr(ctx typecontext) Mangler {
var n uint
// Iteratively dereference ptrs
for ctx.rtype.Kind() == reflect.Pointer {
ctx.rtype = ctx.rtype.Elem()
n++
}
// Set ptr type.
ctx.isptr = true
// Search for elemn type mangler.
if mng := load(ctx); mng != nil {
return deref_ptr_mangler(ctx, mng, n)
}
return nil
}
// loadReflectKnownSlice loads a Mangler function for a
// known slice-of-element type (in this case, primtives).
func loadReflectKnownSlice(ctx typecontext) Mangler {
switch ctx.rtype.Kind() {
case reflect.String:
return mangle_string_slice
case reflect.Bool:
return mangle_bool_slice
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int_slice
case reflect.Int8, reflect.Uint8:
return mangle_8bit_slice
case reflect.Int16, reflect.Uint16:
return mangle_16bit_slice
case reflect.Int32, reflect.Uint32:
return mangle_32bit_slice
case reflect.Int64, reflect.Uint64:
return mangle_64bit_slice
case reflect.Float32:
return mangle_32bit_slice
case reflect.Float64:
return mangle_64bit_slice
case reflect.Complex64:
return mangle_64bit_slice
case reflect.Complex128:
return mangle_128bit_slice
default:
return nil
}
}
// loadReflectSlice ...
func loadReflectSlice(ctx typecontext) Mangler {
// Get nested element type.
elem := ctx.rtype.Elem()
// Set this as nested type.
ctx.set_nested(false)
ctx.rtype = elem
// Preferably look for known slice mangler func
if mng := loadReflectKnownSlice(ctx); mng != nil {
return mng
}
// Use nested mangler iteration.
if mng := load(ctx); mng != nil {
return iter_slice_mangler(ctx, mng)
}
return nil
}
// loadReflectArray ...
func loadReflectArray(ctx typecontext) Mangler {
// Get nested element type.
elem := ctx.rtype.Elem()
// Set this as a nested value type.
direct := ctx.rtype.Len() <= 1
ctx.set_nested(direct)
ctx.rtype = elem
// Use manglers for nested iteration.
if mng := load(ctx); mng != nil {
return iter_array_mangler(ctx, mng)
}
return nil
}
// loadReflectStruct ...
func loadReflectStruct(ctx typecontext) Mangler {
var mngs []Mangler
// Set this as a nested value type.
direct := ctx.rtype.NumField() <= 1
ctx.set_nested(direct)
// Gather manglers for all fields.
for i := 0; i < ctx.ntype.NumField(); i++ {
// Update context with field at index.
ctx.rtype = ctx.ntype.Field(i).Type
// Load mangler.
mng := load(ctx)
if mng == nil {
return nil
}
// Append next to map.
mngs = append(mngs, mng)
}
// Use manglers for nested iteration.
return iter_struct_mangler(ctx, mngs)
}

View file

@ -1,130 +0,0 @@
package mangler
import (
"reflect"
"sync"
"unsafe"
)
// manglers is a map of runtime
// type ptrs => Mangler functions.
var manglers sync.Map
// Mangler is a function that will take an input interface value of known
// type, and append it in mangled serialized form to the given byte buffer.
// While the value type is an interface, the Mangler functions are accessed
// by the value's runtime type pointer, allowing the input value type to be known.
type Mangler func(buf []byte, ptr unsafe.Pointer) []byte
// Get will fetch the Mangler function for given runtime type.
// Note that the returned mangler will be a no-op in the case
// that an incorrect type is passed as the value argument.
func Get(t reflect.Type) Mangler {
var mng Mangler
// Get raw runtime type ptr
uptr := uintptr(eface_data(t))
// Look for a cached mangler
v, ok := manglers.Load(uptr)
if !ok {
// Load mangler function
mng = loadMangler(t)
} else {
// cast cached value
mng = v.(Mangler)
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
// First write the type ptr (this adds
// a unique prefix for each runtime type).
buf = append_uint64(buf, uint64(uptr))
// Finally, mangle value
return mng(buf, ptr)
}
}
// Register will register the given Mangler function for use with vars of given runtime type. This allows
// registering performant manglers for existing types not implementing Mangled (e.g. std library types).
// NOTE: panics if there already exists a Mangler function for given type. Register on init().
func Register(t reflect.Type, m Mangler) {
if t == nil {
// Nil interface{} types cannot be searched by, do not accept
panic("cannot register mangler for nil interface{} type")
}
// Get raw runtime type ptr
uptr := uintptr(eface_data(t))
// Ensure this is a unique encoder
if _, ok := manglers.Load(uptr); ok {
panic("already registered mangler for type: " + t.String())
}
// Cache this encoder func
manglers.Store(uptr, m)
}
// Append will append the mangled form of input value 'a' to buffer 'b'.
// See mangler.String() for more information on mangled output.
func Append(b []byte, a any) []byte {
var mng Mangler
// Get reflect type of 'a'
t := reflect.TypeOf(a)
// Get raw runtime type ptr
uptr := uintptr(eface_data(t))
// Look for a cached mangler
v, ok := manglers.Load(uptr)
if !ok {
// Load into cache
mng = loadMangler(t)
manglers.Store(uptr, mng)
} else {
// cast cached value
mng = v.(Mangler)
}
// First write the type ptr (this adds
// a unique prefix for each runtime type).
b = append_uint64(b, uint64(uptr))
// Finally, mangle value
ptr := eface_data(a)
return mng(b, ptr)
}
// String will return the mangled format of input value 'a'. This
// mangled output will be unique for all default supported input types
// during a single runtime instance. Uniqueness cannot be guaranteed
// between separate runtime instances (whether running concurrently, or
// the same application running at different times).
//
// The exact formatting of the output data should not be relied upon,
// only that it is unique given the above constraints. Generally though,
// the mangled output is the binary formatted text of given input data.
//
// Uniqueness is guaranteed for similar input data of differing types
// (e.g. string("hello world") vs. []byte("hello world")) by prefixing
// mangled output with the input data's runtime type pointer.
//
// Default supported types include:
// - string
// - bool
// - int,int8,int16,int32,int64
// - uint,uint8,uint16,uint32,uint64,uintptr
// - float32,float64
// - complex64,complex128
// - arbitrary structs
// - all type aliases of above
// - all pointers to the above
// - all slices / arrays of the above
func String(a any) string {
b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b))
}

55
vendor/codeberg.org/gruf/go-mangler/v2/array.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
package mangler
import (
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// iterArrayType returns a Mangler capable of iterating
// and mangling the given array type currently in TypeIter{}.
// note this will fetch sub-Mangler for array element type.
func iterArrayType(t xunsafe.TypeIter) Mangler {
// Array element type.
elem := t.Type.Elem()
// Get nested elem TypeIter with appropriate flags.
flags := xunsafe.ReflectArrayElemFlags(t.Flag, elem)
et := t.Child(elem, flags)
// Get elem mangler.
fn := loadOrGet(et)
if fn == nil {
return nil
}
// Array element in-memory size.
esz := t.Type.Elem().Size()
// No of elements.
n := t.Type.Len()
switch n {
case 0:
return empty_mangler
case 1:
return fn
default:
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < n; i++ {
// Mangle data at array index.
offset := esz * uintptr(i)
eptr := add(ptr, offset)
buf = fn(buf, eptr)
buf = append(buf, ',')
}
if n > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
}

62
vendor/codeberg.org/gruf/go-mangler/v2/cache.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package mangler
import (
"sync/atomic"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
var manglers cache
// cache is a concurrency-safe map[xunsafe.TypeInfo]Mangler
// cache, designed for heavy reads but with unfortunately expensive
// writes. it is designed such that after some initial load period
// in which functions are cached by types, all future ops are reads.
type cache struct{ p unsafe.Pointer }
// Get will check cache for mangler func under key.
func (c *cache) Get(t xunsafe.TypeInfo) Mangler {
if p := c.load(); p != nil {
return (*p)[t]
}
return nil
}
// Put will place given mangler func in cache under key, if not already exists.
func (c *cache) Put(t xunsafe.TypeInfo, fn Mangler) {
for {
p := c.load()
var cache map[xunsafe.TypeInfo]Mangler
if p != nil {
if _, ok := (*p)[t]; ok {
return
}
cache = make(map[xunsafe.TypeInfo]Mangler, len(*p)+1)
for key, value := range *p {
cache[key] = value
}
} else {
cache = make(map[xunsafe.TypeInfo]Mangler, 1)
}
cache[t] = fn
if c.cas(p, &cache) {
return
}
}
}
// load is a typed wrapper around atomic.LoadPointer().
func (c *cache) load() *map[xunsafe.TypeInfo]Mangler {
return (*map[xunsafe.TypeInfo]Mangler)(atomic.LoadPointer(&c.p))
}
// cas is a typed wrapper around atomic.CompareAndSwapPointer().
func (c *cache) cas(old, new *map[xunsafe.TypeInfo]Mangler) bool {
return atomic.CompareAndSwapPointer(&c.p, unsafe.Pointer(old), unsafe.Pointer(new))
}

43
vendor/codeberg.org/gruf/go-mangler/v2/helpers.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
package mangler
import (
"unsafe"
)
func append_uint16(b []byte, u uint16) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
)
}
func append_uint32(b []byte, u uint32) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
)
}
func append_uint64(b []byte, u uint64) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
byte(u>>32),
byte(u>>40),
byte(u>>48),
byte(u>>56),
)
}
func empty_mangler(buf []byte, _ unsafe.Pointer) []byte {
return buf
}
// add returns the ptr addition of starting ptr and a delta.
func add(ptr unsafe.Pointer, delta uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + delta)
}

150
vendor/codeberg.org/gruf/go-mangler/v2/load.go generated vendored Normal file
View file

@ -0,0 +1,150 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// loadOrStore first checks the cache for a Mangler
// function, else generates one by calling get().
// note: this does store generated funcs in cache.
func loadOrStore(t xunsafe.TypeIter) Mangler {
// Get cache key.
key := t.TypeInfo
// Check cache for func.
fn := manglers.Get(key)
if fn == nil {
// Generate new mangler
// func for this type.
fn = get(t)
if fn == nil {
return nil
}
// Store func in cache.
manglers.Put(key, fn)
}
return fn
}
// loadOrGet first checks the cache for a Mangler
// function, else generates one by calling get().
// note: it does not store the function in cache.
func loadOrGet(t xunsafe.TypeIter) Mangler {
// Check cache for mangler func.
fn := manglers.Get(t.TypeInfo)
if fn == nil {
// Generate new mangler
// func for this type.
fn = get(t)
}
return fn
}
var (
// reflectTypeType is the reflected type of the reflect type,
// used in fmt.get() to prevent iter of internal ABI structs.
reflectTypeType = reflect.TypeOf(reflect.TypeOf(0))
)
// get attempts to generate a new Mangler function
// capable of mangling a ptr of given type information.
func get(t xunsafe.TypeIter) (fn Mangler) {
defer func() {
if fn == nil {
// nothing more
// we can do.
return
}
if t.Parent != nil {
// We're only interested
// in wrapping top-level.
return
}
// Get reflected type ptr for prefix.
ptr := xunsafe.ReflectTypeData(t.Type)
uptr := uintptr(ptr)
// Outer fn.
mng := fn
// Wrap the mangler func to prepend type pointer.
fn = func(buf []byte, ptr unsafe.Pointer) []byte {
buf = append_uint64(buf, uint64(uptr))
return mng(buf, ptr)
}
}()
if t.Type == nil {
// nil type.
return nil
}
if t.Type == reflectTypeType {
// DO NOT iterate down internal ABI
// types, some are in non-GC memory.
return nil
}
// Check supports known method receiver.
if fn := getMethodType(t); fn != nil {
return fn
}
if !visit(t) {
// On type recursion simply
// mangle as raw pointer.
return mangle_int
}
// Get func for type kind.
switch t.Type.Kind() {
case reflect.Pointer:
return derefPointerType(t)
case reflect.Struct:
return iterStructType(t)
case reflect.Array:
return iterArrayType(t)
case reflect.Slice:
return iterSliceType(t)
case reflect.Map:
return iterMapType(t)
case reflect.String:
return mangle_string
case reflect.Bool:
return mangle_bool
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int
case reflect.Int8, reflect.Uint8:
return mangle_8bit
case reflect.Int16, reflect.Uint16:
return mangle_16bit
case reflect.Int32, reflect.Uint32:
return mangle_32bit
case reflect.Int64, reflect.Uint64:
return mangle_64bit
case reflect.Float32:
return mangle_32bit
case reflect.Float64:
return mangle_64bit
case reflect.Complex64:
return mangle_64bit
case reflect.Complex128:
return mangle_128bit
default:
return nil
}
}

93
vendor/codeberg.org/gruf/go-mangler/v2/mangle.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
package mangler
import (
"fmt"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// Mangler is a function that will take an input value of known type,
// and append it in mangled serialized form to the given byte buffer.
type Mangler func(buf []byte, ptr unsafe.Pointer) []byte
// Get will fetch the Mangler function for given runtime type information.
// The required argument is of type xunsafe.TypeIter{} as unsafe pointer
// access requires further contextual information like type nesting.
func Get(t xunsafe.TypeIter) Mangler {
t.Parent = nil // enforce type prefix
fn := loadOrStore(t)
if fn == nil {
panic(fmt.Sprintf("cannot mangle type: %s", t.Type))
}
return fn
}
// GetNoLoad is functionally similar to Get(),
// without caching the resulting Mangler.
func GetNoLoad(t xunsafe.TypeIter) Mangler {
t.Parent = nil // enforce type prefix
fn := loadOrGet(t)
if fn == nil {
panic(fmt.Sprintf("cannot mangle type: %s", t.Type))
}
return fn
}
// Append will append the mangled form of input value 'a' to buffer 'b'.
//
// See mangler.String() for more information on mangled output.
func Append(b []byte, a any) []byte {
t := xunsafe.TypeIterFrom(a)
p := xunsafe.UnpackEface(a)
return Get(t)(b, p)
}
// AppendMulti appends all mangled forms of input value(s) 'a' to buffer 'b'
// separated by colon characters. When all type manglers are currently cached
// for all types in 'a', this will be faster than multiple calls to Append().
//
// See mangler.String() for more information on mangled output.
func AppendMulti(b []byte, a ...any) []byte {
if p := manglers.load(); p != nil {
b4 := len(b)
for _, a := range a {
t := xunsafe.TypeIterFrom(a)
m := (*p)[t.TypeInfo]
if m == nil {
b = b[:b4]
goto slow
}
b = m(b, xunsafe.UnpackEface(a))
b = append(b, '.')
}
return b
}
slow:
for _, a := range a {
b = Append(b, a)
b = append(b, '.')
}
return b
}
// String will return the mangled format of input value 'a'. This
// mangled output will be unique for all default supported input types
// during a single runtime instance. Uniqueness cannot be guaranteed
// between separate runtime instances (whether running concurrently, or
// the same application running at different times).
//
// The exact formatting of the output data should not be relied upon,
// only that it is unique given the above constraints. Generally though,
// the mangled output is the binary formatted text of given input data.
//
// Uniqueness is guaranteed for similar input data of differing types
// (e.g. string("hello world") vs. []byte("hello world")) by prefixing
// mangled output with the input data's runtime type pointer.
//
// Default supported types include all concrete (i.e. non-interface{})
// data types, and interfaces implementing Mangleable{}.
func String(a any) string {
b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b))
}

80
vendor/codeberg.org/gruf/go-mangler/v2/map.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
package mangler
import (
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// iterMapType returns a Mangler capable of iterating
// and mangling the given map type currently in TypeIter{}.
// note this will fetch sub-Manglers for key / value types.
func iterMapType(t xunsafe.TypeIter) Mangler {
// Key / value types.
key := t.Type.Key()
elem := t.Type.Elem()
// Get nested k / v TypeIters with appropriate flags.
flagsKey := xunsafe.ReflectMapKeyFlags(key)
flagsVal := xunsafe.ReflectMapElemFlags(elem)
kt := t.Child(key, flagsKey)
vt := t.Child(elem, flagsVal)
// Get key mangler.
kfn := loadOrGet(kt)
if kfn == nil {
return nil
}
// Get value mangler.
vfn := loadOrGet(vt)
if vfn == nil {
return nil
}
// Final map type.
rtype := t.Type
flags := t.Flag
return func(buf []byte, ptr unsafe.Pointer) []byte {
if ptr == nil || *(*unsafe.Pointer)(ptr) == nil {
// Append nil indicator.
buf = append(buf, '0')
return buf
}
// Build reflect value, and then a map iterator.
v := xunsafe.BuildReflectValue(rtype, ptr, flags)
i := xunsafe.GetMapIter(v)
// Before len.
l := len(buf)
// Append not-nil flag.
buf = append(buf, '1')
for i.Next() {
// Pass to map key func.
ptr = xunsafe.Map_Key(i)
buf = kfn(buf, ptr)
// Add key seperator.
buf = append(buf, ':')
// Pass to map elem func.
ptr = xunsafe.Map_Elem(i)
buf = vfn(buf, ptr)
// Add comma seperator.
buf = append(buf, ',')
}
if len(buf) != l {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}

105
vendor/codeberg.org/gruf/go-mangler/v2/method.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
var (
// mangleable type for implement checks.
mangleableType = reflect.TypeFor[Mangleable]()
)
type Mangleable interface {
Mangle(dst []byte) []byte
}
// getMethodType returns a *possible* Mangler to handle case
// of a type that implements any known interface{} types, else nil.
func getMethodType(t xunsafe.TypeIter) Mangler {
switch {
case t.Type.Implements(mangleableType):
switch t.Type.Kind() {
case reflect.Interface:
return getInterfaceMangleableType(t)
default:
return getConcreteMangleableType(t)
}
default:
return nil
}
}
// getInterfaceMangleableType returns a Mangler to handle case of an interface{}
// type that implements Mangleable{}, i.e. Mangleable{} itself and any superset of.
func getInterfaceMangleableType(t xunsafe.TypeIter) Mangler {
switch t.Indirect() && !t.IfaceIndir() {
case true:
return func(buf []byte, ptr unsafe.Pointer) []byte {
ptr = *(*unsafe.Pointer)(ptr)
if ptr == nil || (*xunsafe.Abi_NonEmptyInterface)(ptr).Data == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(ptr)
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
case false:
return func(buf []byte, ptr unsafe.Pointer) []byte {
if ptr == nil || (*xunsafe.Abi_NonEmptyInterface)(ptr).Data == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(ptr)
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
default:
panic("unreachable")
}
}
// getConcreteMangleableType returns a Manlger to handle case of concrete
// (i.e. non-interface{}) type that has a Mangleable{} method receiver.
func getConcreteMangleableType(t xunsafe.TypeIter) Mangler {
itab := xunsafe.GetIfaceITab[Mangleable](t.Type)
switch {
case t.Indirect() && !t.IfaceIndir():
return func(buf []byte, ptr unsafe.Pointer) []byte {
ptr = *(*unsafe.Pointer)(ptr)
if ptr == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(xunsafe.PackIface(itab, ptr))
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
case t.Type.Kind() == reflect.Pointer && t.Type.Implements(mangleableType):
// if the interface implementation is received by
// value type, the pointer type will also support
// it but it requires an extra dereference check.
return func(buf []byte, ptr unsafe.Pointer) []byte {
if ptr == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(xunsafe.PackIface(itab, ptr))
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
default:
return func(buf []byte, ptr unsafe.Pointer) []byte {
v := *(*Mangleable)(xunsafe.PackIface(itab, ptr))
buf = v.Mangle(buf)
return buf
}
}
}

81
vendor/codeberg.org/gruf/go-mangler/v2/pointer.go generated vendored Normal file
View file

@ -0,0 +1,81 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// derefPointerType returns a Mangler capable of dereferencing
// and formatting the given pointer type currently in TypeIter{}.
// note this will fetch a sub-Mangler for resulting value type.
func derefPointerType(t xunsafe.TypeIter) Mangler {
var derefs int
var indirects int64
rtype := t.Type
flags := t.Flag
// Iteratively dereference pointer types.
for rtype.Kind() == reflect.Pointer {
// Only if this is actual indirect memory do we
// perform a derefence, otherwise we just skip over
// and increase the dereference indicator, i.e. '1'.
if flags&xunsafe.Reflect_flagIndir != 0 {
indirects |= 1 << derefs
}
derefs++
// Get next elem type.
rtype = rtype.Elem()
// Get next set of dereferenced element type flags.
flags = xunsafe.ReflectPointerElemFlags(flags, rtype)
}
// Ensure this is a reasonable number of derefs.
if derefs > 4*int(unsafe.Sizeof(indirects)) {
return nil
}
// Wrap value as TypeIter.
vt := t.Child(rtype, flags)
// Get value mangler.
fn := loadOrGet(vt)
if fn == nil {
return nil
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < derefs; i++ {
switch {
case indirects&1<<i == 0:
// No dereference needed.
buf = append(buf, '1')
case ptr == nil:
// Nil value, return here.
buf = append(buf, '0')
return buf
default:
// Further deref ptr.
buf = append(buf, '1')
ptr = *(*unsafe.Pointer)(ptr)
}
}
if ptr == nil {
// Final nil val check.
buf = append(buf, '0')
return buf
}
// Mangle fully deref'd.
buf = append(buf, '1')
buf = fn(buf, ptr)
return buf
}
}

95
vendor/codeberg.org/gruf/go-mangler/v2/slice.go generated vendored Normal file
View file

@ -0,0 +1,95 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// iterSliceType returns a Mangler capable of iterating
// and mangling the given slice type currently in TypeIter{}.
// note this will fetch sub-Mangler for slice element type.
func iterSliceType(t xunsafe.TypeIter) Mangler {
// Get nested element type.
elem := t.Type.Elem()
esz := elem.Size()
// Get nested elem TypeIter{} with flags.
flags := xunsafe.ReflectSliceElemFlags(elem)
et := t.Child(elem, flags)
// Prefer to use a known slice mangler func.
if fn := mangleKnownSlice(et); fn != nil {
return fn
}
// Get elem mangler.
fn := loadOrGet(et)
if fn == nil {
return nil
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get data as unsafe slice header.
hdr := (*xunsafe.Unsafeheader_Slice)(ptr)
if hdr == nil || hdr.Data == nil {
// Append nil indicator.
buf = append(buf, '0')
return buf
}
// Append not-nil flag.
buf = append(buf, '1')
for i := 0; i < hdr.Len; i++ {
// Mangle at array index.
offset := esz * uintptr(i)
ptr = add(hdr.Data, offset)
buf = fn(buf, ptr)
buf = append(buf, ',')
}
if hdr.Len > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
// mangleKnownSlice loads a Mangler function for a
// known slice-of-element type (in this case, primtives).
func mangleKnownSlice(t xunsafe.TypeIter) Mangler {
switch t.Type.Kind() {
case reflect.String:
return mangle_string_slice
case reflect.Bool:
return mangle_bool_slice
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int_slice
case reflect.Int8, reflect.Uint8:
return mangle_8bit_slice
case reflect.Int16, reflect.Uint16:
return mangle_16bit_slice
case reflect.Int32, reflect.Uint32:
return mangle_32bit_slice
case reflect.Int64, reflect.Uint64:
return mangle_64bit_slice
case reflect.Float32:
return mangle_32bit_slice
case reflect.Float64:
return mangle_64bit_slice
case reflect.Complex64:
return mangle_64bit_slice
case reflect.Complex128:
return mangle_128bit_slice
default:
return nil
}
}

75
vendor/codeberg.org/gruf/go-mangler/v2/struct.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
package mangler
import (
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// field stores the minimum necessary
// data for iterating and mangling
// each field in a given struct.
type field struct {
mangle Mangler
offset uintptr
}
// iterStructType returns a Mangler capable of iterating
// and mangling the given struct type currently in TypeIter{}.
// note this will fetch sub-Manglers for each struct field.
func iterStructType(t xunsafe.TypeIter) Mangler {
// Number of struct fields.
n := t.Type.NumField()
// Gather mangler functions.
fields := make([]field, n)
for i := 0; i < n; i++ {
// Get struct field at index.
sfield := t.Type.Field(i)
rtype := sfield.Type
// Get nested field TypeIter with appropriate flags.
flags := xunsafe.ReflectStructFieldFlags(t.Flag, rtype)
ft := t.Child(sfield.Type, flags)
// Get field mangler.
fn := loadOrGet(ft)
if fn == nil {
return nil
}
// Set field info.
fields[i] = field{
mangle: fn,
offset: sfield.Offset,
}
}
// Handle no. fields.
switch len(fields) {
case 0:
return empty_mangler
case 1:
return fields[0].mangle
default:
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := range fields {
// Get struct field ptr via offset.
fptr := add(ptr, fields[i].offset)
// Mangle the struct field data.
buf = fields[i].mangle(buf, fptr)
buf = append(buf, ',')
}
if len(fields) > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
}

20
vendor/codeberg.org/gruf/go-mangler/v2/type.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package mangler
import (
"codeberg.org/gruf/go-xunsafe"
)
// visit checks if current type has already
// appeared in the TypeIter{}'s parent heirarchy.
func visit(iter xunsafe.TypeIter) bool {
t := iter.Type
// Check if type is already encountered further up tree.
for node := iter.Parent; node != nil; node = node.Parent {
if node.Type == t {
return false
}
}
return true
}