[chore] update dependencies (#4386)

- codeberg.org/gruf/go-bytesize v1.0.3 -> v1.0.4
- codeberg.org/gruf/go-kv/v2 v2.0.6 -> v2.0.7
- codeberg.org/gruf/go-mutexes v1.5.2 -> v1.5.3
- codeberg.org/gruf/go-structr v0.9.7 -> v0.9.8
- codeberg.org/gruf/go-ffmpreg v0.6.8 -> v0.6.9
- github.com/tomnomnom/linkheader HEAD@2018 -> HEAD@2025

all of the above codeberg.org/gruf updates are in preparation for Go1.25, except for bytesize, and also ffmpreg which is a rebuild with the latest version of ffmpeg (v5.1.7)

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4386
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-08-21 16:41:50 +02:00 committed by kim
commit a79f83cbde
38 changed files with 1246 additions and 964 deletions

View file

@ -94,8 +94,10 @@ var (
// methods for byte sizes in both IEC and SI units.
type Size uint64
// ParseSize will parse a valid Size from given string. Both IEC and SI units are supported.
// ParseSize will parse a valid Size from given
// string. Both IEC and SI units are supported.
func ParseSize(s string) (Size, error) {
// Parse units from string
unit, l, err := parseUnit(s)
if err != nil {
@ -121,10 +123,15 @@ func (sz *Size) Set(in string) error {
return nil
}
// AppendText implements encoding.TextAppender{}.
func (sz Size) AppendText(b []byte) ([]byte, error) {
return sz.AppendFormatIEC(b), nil
}
// MarshalText implements encoding.TextMarshaler{}.
func (sz *Size) MarshalText() ([]byte, error) {
func (sz Size) MarshalText() ([]byte, error) {
const maxLen = 7 // max IEC string length
return sz.AppendFormatIEC(make([]byte, 0, maxLen)), nil
return sz.AppendText(make([]byte, 0, maxLen))
}
// UnmarshalText implements encoding.TextUnmarshaler{}.
@ -143,8 +150,12 @@ func (sz Size) AppendFormatSI(dst []byte) []byte {
dst = itoa(dst, uint64(sz))
dst = append(dst, 'B')
return dst
} // above is fast-path, .appendFormat() is outlined
return sz.appendFormat(dst, 1000, &sipows, "B")
}
f, u := sztof(sz, 1000, sipows)
dst = ftoa(dst, f)
dst = append(dst, u)
dst = append(dst, 'B')
return dst
}
// AppendFormatIEC will append IEC formatted size to 'dst'.
@ -153,35 +164,11 @@ func (sz Size) AppendFormatIEC(dst []byte) []byte {
dst = itoa(dst, uint64(sz))
dst = append(dst, 'B')
return dst
} // above is fast-path, .appendFormat() is outlined
return sz.appendFormat(dst, 1024, &iecpows, "iB")
}
// appendFormat will append formatted Size to 'dst', depending on base, powers table and single unit suffix.
func (sz Size) appendFormat(dst []byte, base uint64, pows *[6]float64, sunit string) []byte {
const (
// min "small" unit threshold
min = 0.75
// binary unit chars.
units = `kMGTPE`
)
// Larger number: get value of
// i / unit size. We have a 'min'
// threshold after which we prefer
// using the unit 1 down
n := bits.Len64(uint64(sz)) / 10
f := float64(sz) / pows[n-1]
if f < min {
f *= float64(base)
n--
}
// Append formatted float with units
f, u := sztof(sz, 1024, iecpows)
dst = ftoa(dst, f)
dst = append(dst, units[n-1])
dst = append(dst, sunit...)
dst = append(dst, u)
dst = append(dst, 'i', 'B')
return dst
}
@ -261,6 +248,31 @@ func parseUnit(s string) (float64, int, error) {
return sivals[c], l, nil
}
// sztof divides a Size with base and power units to a float value with power.
func sztof(sz Size, base float64, pows [6]float64) (float64, byte) {
const (
// min "small"
// unit threshold.
min = 0.75
// binary unit chars.
units = `kMGTPE`
)
// Larger number: get value of
// i / unit size. We have a 'min'
// threshold after which we prefer
// using the unit 1 down
n := bits.Len64(uint64(sz)) / 10
f := float64(sz) / pows[n-1]
if f < min {
f *= base
n--
}
return f, units[n-1]
}
// ftoa appends string formatted 'f' to 'dst', assumed < ~800.
func ftoa(dst []byte, f float64) []byte {
switch i := uint64(f); {

Binary file not shown.

32
vendor/codeberg.org/gruf/go-kv/v2/format/README.md generated vendored Normal file
View file

@ -0,0 +1,32 @@
# format
a low-level string formatting library that takes arbitrary input types as interfaces, and arguments as a struct. this does not contain any printf-like argument parsing, only log-friendly serialization of arbitrary input arguments. (noting that our output is noticably more log-friendly for struct / map types than stdlib "fmt").
benchmarks:
```shell
goos: linux
goarch: amd64
pkg: codeberg.org/gruf/go-kv/v2/format
cpu: AMD Ryzen 7 7840U w/ Radeon 780M Graphics
# go-kv/v2/format (i.e. latest)
BenchmarkFormatV2Append
BenchmarkFormatV2Append-16 590422 1977 ns/op 488 B/op 23 allocs/op
BenchmarkFormatV2AppendVerbose
BenchmarkFormatV2AppendVerbose-16 375628 2981 ns/op 1704 B/op 45 allocs/op
# go-kv/format (i.e. v1)
BenchmarkFormatAppend
BenchmarkFormatAppend-16 208357 5883 ns/op 2624 B/op 169 allocs/op
BenchmarkFormatAppendVerbose
BenchmarkFormatAppendVerbose-16 35916 33563 ns/op 3734 B/op 208 allocs/op
# fmt (i.e. stdlib)
BenchmarkFmtAppend
BenchmarkFmtAppend-16 147722 8418 ns/op 4747 B/op 191 allocs/op
BenchmarkFmtAppendVerbose
BenchmarkFmtAppendVerbose-16 167112 7238 ns/op 4401 B/op 178 allocs/op
PASS
ok codeberg.org/gruf/go-kv/v2/format
```

View file

@ -84,8 +84,8 @@ func getInterfaceStringerType(t xunsafe.TypeIter) FormatFunc {
// (i.e. non-interface{}) type that has a Stringer{} method receiver.
func getConcreteStringerType(t xunsafe.TypeIter) FormatFunc {
itab := xunsafe.GetIfaceITab[Stringer](t.Type)
switch t.Indirect() && !t.IfaceIndir() {
case true:
switch {
case t.Indirect() && !t.IfaceIndir():
return with_typestr_ptrs(t, func(s *State) {
s.P = *(*unsafe.Pointer)(s.P)
if s.P == nil {
@ -95,13 +95,23 @@ func getConcreteStringerType(t xunsafe.TypeIter) FormatFunc {
v := *(*Stringer)(xunsafe.PackIface(itab, s.P))
appendString(s, v.String())
})
case false:
case t.Type.Kind() == reflect.Pointer && t.Type.Implements(stringerType):
// if the interface implementation is received by
// value type, the pointer type will also support
// it but it requires an extra dereference check.
return with_typestr_ptrs(t, func(s *State) {
if s.P == nil {
appendNil(s)
return
}
v := *(*Stringer)(xunsafe.PackIface(itab, s.P))
appendString(s, v.String())
})
default:
panic("unreachable")
return with_typestr_ptrs(t, func(s *State) {
v := *(*Stringer)(xunsafe.PackIface(itab, s.P))
appendString(s, v.String())
})
}
}
@ -137,8 +147,8 @@ func getInterfaceFormattableType(t xunsafe.TypeIter) FormatFunc {
// (i.e. non-interface{}) type that has a Formattable{} method receiver.
func getConcreteFormattableType(t xunsafe.TypeIter) FormatFunc {
itab := xunsafe.GetIfaceITab[Formattable](t.Type)
switch t.Indirect() && !t.IfaceIndir() {
case true:
switch {
case t.Indirect() && !t.IfaceIndir():
return with_typestr_ptrs(t, func(s *State) {
s.P = *(*unsafe.Pointer)(s.P)
if s.P == nil {
@ -148,13 +158,23 @@ func getConcreteFormattableType(t xunsafe.TypeIter) FormatFunc {
v := *(*Formattable)(xunsafe.PackIface(itab, s.P))
v.Format(s)
})
case false:
case t.Type.Kind() == reflect.Pointer && t.Type.Implements(formattableType):
// if the interface implementation is received by
// value type, the pointer type will also support
// it but it requires an extra dereference check.
return with_typestr_ptrs(t, func(s *State) {
if s.P == nil {
appendNil(s)
return
}
v := *(*Formattable)(xunsafe.PackIface(itab, s.P))
v.Format(s)
})
default:
panic("unreachable")
return with_typestr_ptrs(t, func(s *State) {
v := *(*Formattable)(xunsafe.PackIface(itab, s.P))
v.Format(s)
})
}
}
@ -190,8 +210,8 @@ func getInterfaceErrorType(t xunsafe.TypeIter) FormatFunc {
// (i.e. non-interface{}) type that has an error{} method receiver.
func getConcreteErrorType(t xunsafe.TypeIter) FormatFunc {
itab := xunsafe.GetIfaceITab[error](t.Type)
switch t.Indirect() && !t.IfaceIndir() {
case true:
switch {
case t.Indirect() && !t.IfaceIndir():
return with_typestr_ptrs(t, func(s *State) {
s.P = *(*unsafe.Pointer)(s.P)
if s.P == nil {
@ -201,12 +221,22 @@ func getConcreteErrorType(t xunsafe.TypeIter) FormatFunc {
v := *(*error)(xunsafe.PackIface(itab, s.P))
appendString(s, v.Error())
})
case false:
case t.Type.Kind() == reflect.Pointer && t.Type.Implements(errorType):
// if the interface implementation is received by
// value type, the pointer type will also support
// it but it requires an extra dereference check.
return with_typestr_ptrs(t, func(s *State) {
if s.P == nil {
appendNil(s)
return
}
v := *(*error)(xunsafe.PackIface(itab, s.P))
appendString(s, v.Error())
})
default:
panic("unreachable")
return with_typestr_ptrs(t, func(s *State) {
v := *(*error)(xunsafe.PackIface(itab, s.P))
appendString(s, v.Error())
})
}
}

View file

@ -1,230 +0,0 @@
//go:build go1.19 && !go1.25
package mangler
import (
"reflect"
"unsafe"
)
func append_uint16(b []byte, u uint16) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
)
}
func append_uint32(b []byte, u uint32) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
)
}
func append_uint64(b []byte, u uint64) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
byte(u>>32),
byte(u>>40),
byte(u>>48),
byte(u>>56),
)
}
type typecontext struct {
isptr bool
direct bool
ntype reflect.Type
rtype reflect.Type
}
func (ctx *typecontext) set_nested(direct bool) {
ctx.direct = ctx.direct && direct && !ctx.isptr
ctx.ntype = ctx.rtype
ctx.rtype = nil
ctx.isptr = false
}
func deref_ptr_mangler(ctx typecontext, mangle Mangler, n uint) Mangler {
if mangle == nil || n == 0 {
panic("bad input")
}
// If this is a direct value type, i.e. non-nested primitive,
// or part of a single-field struct / single element array
// then it can be treated as a direct ptr with 1 less deref.
if ctx.direct {
n--
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
// Deref n number times.
for i := n; i > 0; i-- {
if ptr == nil {
// Check for nil values
buf = append(buf, '0')
return buf
}
// Further deref ptr
buf = append(buf, '1')
ptr = *(*unsafe.Pointer)(ptr)
}
if ptr == nil {
// Final nil val check
buf = append(buf, '0')
return buf
}
// Mangle fully deref'd
buf = append(buf, '1')
buf = mangle(buf, ptr)
return buf
}
}
func iter_slice_mangler(ctx typecontext, mangle Mangler) Mangler {
if ctx.rtype == nil || mangle == nil {
panic("bad input")
}
// memory size of elem.
esz := ctx.rtype.Size()
return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get data as slice hdr.
hdr := (*slice_header)(ptr)
for i := 0; i < hdr.len; i++ {
// Mangle data at slice index.
eptr := array_at(hdr.data, esz, i)
buf = mangle(buf, eptr)
buf = append(buf, ',')
}
if hdr.len > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_array_mangler(ctx typecontext, mangle Mangler) Mangler {
if ctx.rtype == nil || mangle == nil {
panic("bad input")
}
// no. array elements.
n := ctx.ntype.Len()
// Optimize
// easy cases.
switch n {
case 0:
return empty_mangler
case 1:
return mangle
}
// memory size of elem.
esz := ctx.rtype.Size()
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < n; i++ {
// Mangle data at array index.
offset := esz * uintptr(i)
eptr := add(ptr, offset)
buf = mangle(buf, eptr)
buf = append(buf, ',')
}
if n > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_struct_mangler(ctx typecontext, manglers []Mangler) Mangler {
if ctx.rtype == nil || len(manglers) != ctx.ntype.NumField() {
panic("bad input")
}
// Optimized easy cases.
switch len(manglers) {
case 0:
return empty_mangler
case 1:
return manglers[0]
}
type field struct {
mangle Mangler
offset uintptr
}
// Bundle together the fields and manglers.
fields := make([]field, ctx.ntype.NumField())
for i := range fields {
rfield := ctx.ntype.Field(i)
fields[i].offset = rfield.Offset
fields[i].mangle = manglers[i]
if fields[i].mangle == nil {
panic("bad input")
}
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := range fields {
// Get struct field ptr via offset.
fptr := add(ptr, fields[i].offset)
// Mangle the struct field data.
buf = fields[i].mangle(buf, fptr)
buf = append(buf, ',')
}
if len(fields) > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
func empty_mangler(buf []byte, _ unsafe.Pointer) []byte {
return buf
}
// array_at returns ptr to index in array at ptr, given element size.
func array_at(ptr unsafe.Pointer, esz uintptr, i int) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + esz*uintptr(i))
}
// add returns the ptr addition of starting ptr and a delta.
func add(ptr unsafe.Pointer, delta uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + delta)
}
type slice_header struct {
data unsafe.Pointer
len int
cap int
}
func eface_data(a any) unsafe.Pointer {
type eface struct{ _, data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&a)).data
}

View file

@ -1,230 +0,0 @@
package mangler
import (
"reflect"
)
// loadMangler is the top-most Mangler load function. It guarantees that a Mangler
// function will be returned for given value interface{} and reflected type. Else panics.
func loadMangler(t reflect.Type) Mangler {
ctx := typecontext{rtype: t}
ctx.direct = true
// Load mangler fn
mng := load(ctx)
if mng != nil {
return mng
}
// No mangler function could be determined
panic("cannot mangle type: " + t.String())
}
// load will load a Mangler or reflect Mangler for given type and iface 'a'.
// Note: allocates new interface value if nil provided, i.e. if coming via reflection.
func load(ctx typecontext) Mangler {
if ctx.rtype == nil {
// There is no reflect type to search by
panic("cannot mangle nil interface{} type")
}
// Search by reflection.
mng := loadReflect(ctx)
if mng != nil {
return mng
}
return nil
}
// loadReflect will load a Mangler (or rMangler) function for the given reflected type info.
// NOTE: this is used as the top level load function for nested reflective searches.
func loadReflect(ctx typecontext) Mangler {
switch ctx.rtype.Kind() {
case reflect.Pointer:
return loadReflectPtr(ctx)
case reflect.String:
return mangle_string
case reflect.Struct:
return loadReflectStruct(ctx)
case reflect.Array:
return loadReflectArray(ctx)
case reflect.Slice:
return loadReflectSlice(ctx)
case reflect.Bool:
return mangle_bool
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int
case reflect.Int8, reflect.Uint8:
return mangle_8bit
case reflect.Int16, reflect.Uint16:
return mangle_16bit
case reflect.Int32, reflect.Uint32:
return mangle_32bit
case reflect.Int64, reflect.Uint64:
return mangle_64bit
case reflect.Float32:
return mangle_32bit
case reflect.Float64:
return mangle_64bit
case reflect.Complex64:
return mangle_64bit
case reflect.Complex128:
return mangle_128bit
default:
return nil
}
}
// loadReflectPtr loads a Mangler (or rMangler) function for a ptr's element type.
// This also handles further dereferencing of any further ptr indrections (e.g. ***int).
func loadReflectPtr(ctx typecontext) Mangler {
var n uint
// Iteratively dereference ptrs
for ctx.rtype.Kind() == reflect.Pointer {
ctx.rtype = ctx.rtype.Elem()
n++
}
// Set ptr type.
ctx.isptr = true
// Search for elemn type mangler.
if mng := load(ctx); mng != nil {
return deref_ptr_mangler(ctx, mng, n)
}
return nil
}
// loadReflectKnownSlice loads a Mangler function for a
// known slice-of-element type (in this case, primtives).
func loadReflectKnownSlice(ctx typecontext) Mangler {
switch ctx.rtype.Kind() {
case reflect.String:
return mangle_string_slice
case reflect.Bool:
return mangle_bool_slice
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int_slice
case reflect.Int8, reflect.Uint8:
return mangle_8bit_slice
case reflect.Int16, reflect.Uint16:
return mangle_16bit_slice
case reflect.Int32, reflect.Uint32:
return mangle_32bit_slice
case reflect.Int64, reflect.Uint64:
return mangle_64bit_slice
case reflect.Float32:
return mangle_32bit_slice
case reflect.Float64:
return mangle_64bit_slice
case reflect.Complex64:
return mangle_64bit_slice
case reflect.Complex128:
return mangle_128bit_slice
default:
return nil
}
}
// loadReflectSlice ...
func loadReflectSlice(ctx typecontext) Mangler {
// Get nested element type.
elem := ctx.rtype.Elem()
// Set this as nested type.
ctx.set_nested(false)
ctx.rtype = elem
// Preferably look for known slice mangler func
if mng := loadReflectKnownSlice(ctx); mng != nil {
return mng
}
// Use nested mangler iteration.
if mng := load(ctx); mng != nil {
return iter_slice_mangler(ctx, mng)
}
return nil
}
// loadReflectArray ...
func loadReflectArray(ctx typecontext) Mangler {
// Get nested element type.
elem := ctx.rtype.Elem()
// Set this as a nested value type.
direct := ctx.rtype.Len() <= 1
ctx.set_nested(direct)
ctx.rtype = elem
// Use manglers for nested iteration.
if mng := load(ctx); mng != nil {
return iter_array_mangler(ctx, mng)
}
return nil
}
// loadReflectStruct ...
func loadReflectStruct(ctx typecontext) Mangler {
var mngs []Mangler
// Set this as a nested value type.
direct := ctx.rtype.NumField() <= 1
ctx.set_nested(direct)
// Gather manglers for all fields.
for i := 0; i < ctx.ntype.NumField(); i++ {
// Update context with field at index.
ctx.rtype = ctx.ntype.Field(i).Type
// Load mangler.
mng := load(ctx)
if mng == nil {
return nil
}
// Append next to map.
mngs = append(mngs, mng)
}
// Use manglers for nested iteration.
return iter_struct_mangler(ctx, mngs)
}

View file

@ -1,130 +0,0 @@
package mangler
import (
"reflect"
"sync"
"unsafe"
)
// manglers is a map of runtime
// type ptrs => Mangler functions.
var manglers sync.Map
// Mangler is a function that will take an input interface value of known
// type, and append it in mangled serialized form to the given byte buffer.
// While the value type is an interface, the Mangler functions are accessed
// by the value's runtime type pointer, allowing the input value type to be known.
type Mangler func(buf []byte, ptr unsafe.Pointer) []byte
// Get will fetch the Mangler function for given runtime type.
// Note that the returned mangler will be a no-op in the case
// that an incorrect type is passed as the value argument.
func Get(t reflect.Type) Mangler {
var mng Mangler
// Get raw runtime type ptr
uptr := uintptr(eface_data(t))
// Look for a cached mangler
v, ok := manglers.Load(uptr)
if !ok {
// Load mangler function
mng = loadMangler(t)
} else {
// cast cached value
mng = v.(Mangler)
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
// First write the type ptr (this adds
// a unique prefix for each runtime type).
buf = append_uint64(buf, uint64(uptr))
// Finally, mangle value
return mng(buf, ptr)
}
}
// Register will register the given Mangler function for use with vars of given runtime type. This allows
// registering performant manglers for existing types not implementing Mangled (e.g. std library types).
// NOTE: panics if there already exists a Mangler function for given type. Register on init().
func Register(t reflect.Type, m Mangler) {
if t == nil {
// Nil interface{} types cannot be searched by, do not accept
panic("cannot register mangler for nil interface{} type")
}
// Get raw runtime type ptr
uptr := uintptr(eface_data(t))
// Ensure this is a unique encoder
if _, ok := manglers.Load(uptr); ok {
panic("already registered mangler for type: " + t.String())
}
// Cache this encoder func
manglers.Store(uptr, m)
}
// Append will append the mangled form of input value 'a' to buffer 'b'.
// See mangler.String() for more information on mangled output.
func Append(b []byte, a any) []byte {
var mng Mangler
// Get reflect type of 'a'
t := reflect.TypeOf(a)
// Get raw runtime type ptr
uptr := uintptr(eface_data(t))
// Look for a cached mangler
v, ok := manglers.Load(uptr)
if !ok {
// Load into cache
mng = loadMangler(t)
manglers.Store(uptr, mng)
} else {
// cast cached value
mng = v.(Mangler)
}
// First write the type ptr (this adds
// a unique prefix for each runtime type).
b = append_uint64(b, uint64(uptr))
// Finally, mangle value
ptr := eface_data(a)
return mng(b, ptr)
}
// String will return the mangled format of input value 'a'. This
// mangled output will be unique for all default supported input types
// during a single runtime instance. Uniqueness cannot be guaranteed
// between separate runtime instances (whether running concurrently, or
// the same application running at different times).
//
// The exact formatting of the output data should not be relied upon,
// only that it is unique given the above constraints. Generally though,
// the mangled output is the binary formatted text of given input data.
//
// Uniqueness is guaranteed for similar input data of differing types
// (e.g. string("hello world") vs. []byte("hello world")) by prefixing
// mangled output with the input data's runtime type pointer.
//
// Default supported types include:
// - string
// - bool
// - int,int8,int16,int32,int64
// - uint,uint8,uint16,uint32,uint64,uintptr
// - float32,float64
// - complex64,complex128
// - arbitrary structs
// - all type aliases of above
// - all pointers to the above
// - all slices / arrays of the above
func String(a any) string {
b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b))
}

55
vendor/codeberg.org/gruf/go-mangler/v2/array.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
package mangler
import (
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// iterArrayType returns a Mangler capable of iterating
// and mangling the given array type currently in TypeIter{}.
// note this will fetch sub-Mangler for array element type.
func iterArrayType(t xunsafe.TypeIter) Mangler {
// Array element type.
elem := t.Type.Elem()
// Get nested elem TypeIter with appropriate flags.
flags := xunsafe.ReflectArrayElemFlags(t.Flag, elem)
et := t.Child(elem, flags)
// Get elem mangler.
fn := loadOrGet(et)
if fn == nil {
return nil
}
// Array element in-memory size.
esz := t.Type.Elem().Size()
// No of elements.
n := t.Type.Len()
switch n {
case 0:
return empty_mangler
case 1:
return fn
default:
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < n; i++ {
// Mangle data at array index.
offset := esz * uintptr(i)
eptr := add(ptr, offset)
buf = fn(buf, eptr)
buf = append(buf, ',')
}
if n > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
}

62
vendor/codeberg.org/gruf/go-mangler/v2/cache.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package mangler
import (
"sync/atomic"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
var manglers cache
// cache is a concurrency-safe map[xunsafe.TypeInfo]Mangler
// cache, designed for heavy reads but with unfortunately expensive
// writes. it is designed such that after some initial load period
// in which functions are cached by types, all future ops are reads.
type cache struct{ p unsafe.Pointer }
// Get will check cache for mangler func under key.
func (c *cache) Get(t xunsafe.TypeInfo) Mangler {
if p := c.load(); p != nil {
return (*p)[t]
}
return nil
}
// Put will place given mangler func in cache under key, if not already exists.
func (c *cache) Put(t xunsafe.TypeInfo, fn Mangler) {
for {
p := c.load()
var cache map[xunsafe.TypeInfo]Mangler
if p != nil {
if _, ok := (*p)[t]; ok {
return
}
cache = make(map[xunsafe.TypeInfo]Mangler, len(*p)+1)
for key, value := range *p {
cache[key] = value
}
} else {
cache = make(map[xunsafe.TypeInfo]Mangler, 1)
}
cache[t] = fn
if c.cas(p, &cache) {
return
}
}
}
// load is a typed wrapper around atomic.LoadPointer().
func (c *cache) load() *map[xunsafe.TypeInfo]Mangler {
return (*map[xunsafe.TypeInfo]Mangler)(atomic.LoadPointer(&c.p))
}
// cas is a typed wrapper around atomic.CompareAndSwapPointer().
func (c *cache) cas(old, new *map[xunsafe.TypeInfo]Mangler) bool {
return atomic.CompareAndSwapPointer(&c.p, unsafe.Pointer(old), unsafe.Pointer(new))
}

43
vendor/codeberg.org/gruf/go-mangler/v2/helpers.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
package mangler
import (
"unsafe"
)
func append_uint16(b []byte, u uint16) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
)
}
func append_uint32(b []byte, u uint32) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
)
}
func append_uint64(b []byte, u uint64) []byte {
return append(b, // LE
byte(u),
byte(u>>8),
byte(u>>16),
byte(u>>24),
byte(u>>32),
byte(u>>40),
byte(u>>48),
byte(u>>56),
)
}
func empty_mangler(buf []byte, _ unsafe.Pointer) []byte {
return buf
}
// add returns the ptr addition of starting ptr and a delta.
func add(ptr unsafe.Pointer, delta uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(ptr) + delta)
}

150
vendor/codeberg.org/gruf/go-mangler/v2/load.go generated vendored Normal file
View file

@ -0,0 +1,150 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// loadOrStore first checks the cache for a Mangler
// function, else generates one by calling get().
// note: this does store generated funcs in cache.
func loadOrStore(t xunsafe.TypeIter) Mangler {
// Get cache key.
key := t.TypeInfo
// Check cache for func.
fn := manglers.Get(key)
if fn == nil {
// Generate new mangler
// func for this type.
fn = get(t)
if fn == nil {
return nil
}
// Store func in cache.
manglers.Put(key, fn)
}
return fn
}
// loadOrGet first checks the cache for a Mangler
// function, else generates one by calling get().
// note: it does not store the function in cache.
func loadOrGet(t xunsafe.TypeIter) Mangler {
// Check cache for mangler func.
fn := manglers.Get(t.TypeInfo)
if fn == nil {
// Generate new mangler
// func for this type.
fn = get(t)
}
return fn
}
var (
// reflectTypeType is the reflected type of the reflect type,
// used in fmt.get() to prevent iter of internal ABI structs.
reflectTypeType = reflect.TypeOf(reflect.TypeOf(0))
)
// get attempts to generate a new Mangler function
// capable of mangling a ptr of given type information.
func get(t xunsafe.TypeIter) (fn Mangler) {
defer func() {
if fn == nil {
// nothing more
// we can do.
return
}
if t.Parent != nil {
// We're only interested
// in wrapping top-level.
return
}
// Get reflected type ptr for prefix.
ptr := xunsafe.ReflectTypeData(t.Type)
uptr := uintptr(ptr)
// Outer fn.
mng := fn
// Wrap the mangler func to prepend type pointer.
fn = func(buf []byte, ptr unsafe.Pointer) []byte {
buf = append_uint64(buf, uint64(uptr))
return mng(buf, ptr)
}
}()
if t.Type == nil {
// nil type.
return nil
}
if t.Type == reflectTypeType {
// DO NOT iterate down internal ABI
// types, some are in non-GC memory.
return nil
}
// Check supports known method receiver.
if fn := getMethodType(t); fn != nil {
return fn
}
if !visit(t) {
// On type recursion simply
// mangle as raw pointer.
return mangle_int
}
// Get func for type kind.
switch t.Type.Kind() {
case reflect.Pointer:
return derefPointerType(t)
case reflect.Struct:
return iterStructType(t)
case reflect.Array:
return iterArrayType(t)
case reflect.Slice:
return iterSliceType(t)
case reflect.Map:
return iterMapType(t)
case reflect.String:
return mangle_string
case reflect.Bool:
return mangle_bool
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int
case reflect.Int8, reflect.Uint8:
return mangle_8bit
case reflect.Int16, reflect.Uint16:
return mangle_16bit
case reflect.Int32, reflect.Uint32:
return mangle_32bit
case reflect.Int64, reflect.Uint64:
return mangle_64bit
case reflect.Float32:
return mangle_32bit
case reflect.Float64:
return mangle_64bit
case reflect.Complex64:
return mangle_64bit
case reflect.Complex128:
return mangle_128bit
default:
return nil
}
}

93
vendor/codeberg.org/gruf/go-mangler/v2/mangle.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
package mangler
import (
"fmt"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// Mangler is a function that will take an input value of known type,
// and append it in mangled serialized form to the given byte buffer.
type Mangler func(buf []byte, ptr unsafe.Pointer) []byte
// Get will fetch the Mangler function for given runtime type information.
// The required argument is of type xunsafe.TypeIter{} as unsafe pointer
// access requires further contextual information like type nesting.
func Get(t xunsafe.TypeIter) Mangler {
t.Parent = nil // enforce type prefix
fn := loadOrStore(t)
if fn == nil {
panic(fmt.Sprintf("cannot mangle type: %s", t.Type))
}
return fn
}
// GetNoLoad is functionally similar to Get(),
// without caching the resulting Mangler.
func GetNoLoad(t xunsafe.TypeIter) Mangler {
t.Parent = nil // enforce type prefix
fn := loadOrGet(t)
if fn == nil {
panic(fmt.Sprintf("cannot mangle type: %s", t.Type))
}
return fn
}
// Append will append the mangled form of input value 'a' to buffer 'b'.
//
// See mangler.String() for more information on mangled output.
func Append(b []byte, a any) []byte {
t := xunsafe.TypeIterFrom(a)
p := xunsafe.UnpackEface(a)
return Get(t)(b, p)
}
// AppendMulti appends all mangled forms of input value(s) 'a' to buffer 'b'
// separated by colon characters. When all type manglers are currently cached
// for all types in 'a', this will be faster than multiple calls to Append().
//
// See mangler.String() for more information on mangled output.
func AppendMulti(b []byte, a ...any) []byte {
if p := manglers.load(); p != nil {
b4 := len(b)
for _, a := range a {
t := xunsafe.TypeIterFrom(a)
m := (*p)[t.TypeInfo]
if m == nil {
b = b[:b4]
goto slow
}
b = m(b, xunsafe.UnpackEface(a))
b = append(b, '.')
}
return b
}
slow:
for _, a := range a {
b = Append(b, a)
b = append(b, '.')
}
return b
}
// String will return the mangled format of input value 'a'. This
// mangled output will be unique for all default supported input types
// during a single runtime instance. Uniqueness cannot be guaranteed
// between separate runtime instances (whether running concurrently, or
// the same application running at different times).
//
// The exact formatting of the output data should not be relied upon,
// only that it is unique given the above constraints. Generally though,
// the mangled output is the binary formatted text of given input data.
//
// Uniqueness is guaranteed for similar input data of differing types
// (e.g. string("hello world") vs. []byte("hello world")) by prefixing
// mangled output with the input data's runtime type pointer.
//
// Default supported types include all concrete (i.e. non-interface{})
// data types, and interfaces implementing Mangleable{}.
func String(a any) string {
b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b))
}

80
vendor/codeberg.org/gruf/go-mangler/v2/map.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
package mangler
import (
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// iterMapType returns a Mangler capable of iterating
// and mangling the given map type currently in TypeIter{}.
// note this will fetch sub-Manglers for key / value types.
func iterMapType(t xunsafe.TypeIter) Mangler {
// Key / value types.
key := t.Type.Key()
elem := t.Type.Elem()
// Get nested k / v TypeIters with appropriate flags.
flagsKey := xunsafe.ReflectMapKeyFlags(key)
flagsVal := xunsafe.ReflectMapElemFlags(elem)
kt := t.Child(key, flagsKey)
vt := t.Child(elem, flagsVal)
// Get key mangler.
kfn := loadOrGet(kt)
if kfn == nil {
return nil
}
// Get value mangler.
vfn := loadOrGet(vt)
if vfn == nil {
return nil
}
// Final map type.
rtype := t.Type
flags := t.Flag
return func(buf []byte, ptr unsafe.Pointer) []byte {
if ptr == nil || *(*unsafe.Pointer)(ptr) == nil {
// Append nil indicator.
buf = append(buf, '0')
return buf
}
// Build reflect value, and then a map iterator.
v := xunsafe.BuildReflectValue(rtype, ptr, flags)
i := xunsafe.GetMapIter(v)
// Before len.
l := len(buf)
// Append not-nil flag.
buf = append(buf, '1')
for i.Next() {
// Pass to map key func.
ptr = xunsafe.Map_Key(i)
buf = kfn(buf, ptr)
// Add key seperator.
buf = append(buf, ':')
// Pass to map elem func.
ptr = xunsafe.Map_Elem(i)
buf = vfn(buf, ptr)
// Add comma seperator.
buf = append(buf, ',')
}
if len(buf) != l {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}

105
vendor/codeberg.org/gruf/go-mangler/v2/method.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
var (
// mangleable type for implement checks.
mangleableType = reflect.TypeFor[Mangleable]()
)
type Mangleable interface {
Mangle(dst []byte) []byte
}
// getMethodType returns a *possible* Mangler to handle case
// of a type that implements any known interface{} types, else nil.
func getMethodType(t xunsafe.TypeIter) Mangler {
switch {
case t.Type.Implements(mangleableType):
switch t.Type.Kind() {
case reflect.Interface:
return getInterfaceMangleableType(t)
default:
return getConcreteMangleableType(t)
}
default:
return nil
}
}
// getInterfaceMangleableType returns a Mangler to handle case of an interface{}
// type that implements Mangleable{}, i.e. Mangleable{} itself and any superset of.
func getInterfaceMangleableType(t xunsafe.TypeIter) Mangler {
switch t.Indirect() && !t.IfaceIndir() {
case true:
return func(buf []byte, ptr unsafe.Pointer) []byte {
ptr = *(*unsafe.Pointer)(ptr)
if ptr == nil || (*xunsafe.Abi_NonEmptyInterface)(ptr).Data == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(ptr)
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
case false:
return func(buf []byte, ptr unsafe.Pointer) []byte {
if ptr == nil || (*xunsafe.Abi_NonEmptyInterface)(ptr).Data == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(ptr)
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
default:
panic("unreachable")
}
}
// getConcreteMangleableType returns a Manlger to handle case of concrete
// (i.e. non-interface{}) type that has a Mangleable{} method receiver.
func getConcreteMangleableType(t xunsafe.TypeIter) Mangler {
itab := xunsafe.GetIfaceITab[Mangleable](t.Type)
switch {
case t.Indirect() && !t.IfaceIndir():
return func(buf []byte, ptr unsafe.Pointer) []byte {
ptr = *(*unsafe.Pointer)(ptr)
if ptr == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(xunsafe.PackIface(itab, ptr))
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
case t.Type.Kind() == reflect.Pointer && t.Type.Implements(mangleableType):
// if the interface implementation is received by
// value type, the pointer type will also support
// it but it requires an extra dereference check.
return func(buf []byte, ptr unsafe.Pointer) []byte {
if ptr == nil {
buf = append(buf, '0')
return buf
}
v := *(*Mangleable)(xunsafe.PackIface(itab, ptr))
buf = append(buf, '1')
buf = v.Mangle(buf)
return buf
}
default:
return func(buf []byte, ptr unsafe.Pointer) []byte {
v := *(*Mangleable)(xunsafe.PackIface(itab, ptr))
buf = v.Mangle(buf)
return buf
}
}
}

81
vendor/codeberg.org/gruf/go-mangler/v2/pointer.go generated vendored Normal file
View file

@ -0,0 +1,81 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// derefPointerType returns a Mangler capable of dereferencing
// and formatting the given pointer type currently in TypeIter{}.
// note this will fetch a sub-Mangler for resulting value type.
func derefPointerType(t xunsafe.TypeIter) Mangler {
var derefs int
var indirects int64
rtype := t.Type
flags := t.Flag
// Iteratively dereference pointer types.
for rtype.Kind() == reflect.Pointer {
// Only if this is actual indirect memory do we
// perform a derefence, otherwise we just skip over
// and increase the dereference indicator, i.e. '1'.
if flags&xunsafe.Reflect_flagIndir != 0 {
indirects |= 1 << derefs
}
derefs++
// Get next elem type.
rtype = rtype.Elem()
// Get next set of dereferenced element type flags.
flags = xunsafe.ReflectPointerElemFlags(flags, rtype)
}
// Ensure this is a reasonable number of derefs.
if derefs > 4*int(unsafe.Sizeof(indirects)) {
return nil
}
// Wrap value as TypeIter.
vt := t.Child(rtype, flags)
// Get value mangler.
fn := loadOrGet(vt)
if fn == nil {
return nil
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := 0; i < derefs; i++ {
switch {
case indirects&1<<i == 0:
// No dereference needed.
buf = append(buf, '1')
case ptr == nil:
// Nil value, return here.
buf = append(buf, '0')
return buf
default:
// Further deref ptr.
buf = append(buf, '1')
ptr = *(*unsafe.Pointer)(ptr)
}
}
if ptr == nil {
// Final nil val check.
buf = append(buf, '0')
return buf
}
// Mangle fully deref'd.
buf = append(buf, '1')
buf = fn(buf, ptr)
return buf
}
}

95
vendor/codeberg.org/gruf/go-mangler/v2/slice.go generated vendored Normal file
View file

@ -0,0 +1,95 @@
package mangler
import (
"reflect"
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// iterSliceType returns a Mangler capable of iterating
// and mangling the given slice type currently in TypeIter{}.
// note this will fetch sub-Mangler for slice element type.
func iterSliceType(t xunsafe.TypeIter) Mangler {
// Get nested element type.
elem := t.Type.Elem()
esz := elem.Size()
// Get nested elem TypeIter{} with flags.
flags := xunsafe.ReflectSliceElemFlags(elem)
et := t.Child(elem, flags)
// Prefer to use a known slice mangler func.
if fn := mangleKnownSlice(et); fn != nil {
return fn
}
// Get elem mangler.
fn := loadOrGet(et)
if fn == nil {
return nil
}
return func(buf []byte, ptr unsafe.Pointer) []byte {
// Get data as unsafe slice header.
hdr := (*xunsafe.Unsafeheader_Slice)(ptr)
if hdr == nil || hdr.Data == nil {
// Append nil indicator.
buf = append(buf, '0')
return buf
}
// Append not-nil flag.
buf = append(buf, '1')
for i := 0; i < hdr.Len; i++ {
// Mangle at array index.
offset := esz * uintptr(i)
ptr = add(hdr.Data, offset)
buf = fn(buf, ptr)
buf = append(buf, ',')
}
if hdr.Len > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
// mangleKnownSlice loads a Mangler function for a
// known slice-of-element type (in this case, primtives).
func mangleKnownSlice(t xunsafe.TypeIter) Mangler {
switch t.Type.Kind() {
case reflect.String:
return mangle_string_slice
case reflect.Bool:
return mangle_bool_slice
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_int_slice
case reflect.Int8, reflect.Uint8:
return mangle_8bit_slice
case reflect.Int16, reflect.Uint16:
return mangle_16bit_slice
case reflect.Int32, reflect.Uint32:
return mangle_32bit_slice
case reflect.Int64, reflect.Uint64:
return mangle_64bit_slice
case reflect.Float32:
return mangle_32bit_slice
case reflect.Float64:
return mangle_64bit_slice
case reflect.Complex64:
return mangle_64bit_slice
case reflect.Complex128:
return mangle_128bit_slice
default:
return nil
}
}

75
vendor/codeberg.org/gruf/go-mangler/v2/struct.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
package mangler
import (
"unsafe"
"codeberg.org/gruf/go-xunsafe"
)
// field stores the minimum necessary
// data for iterating and mangling
// each field in a given struct.
type field struct {
mangle Mangler
offset uintptr
}
// iterStructType returns a Mangler capable of iterating
// and mangling the given struct type currently in TypeIter{}.
// note this will fetch sub-Manglers for each struct field.
func iterStructType(t xunsafe.TypeIter) Mangler {
// Number of struct fields.
n := t.Type.NumField()
// Gather mangler functions.
fields := make([]field, n)
for i := 0; i < n; i++ {
// Get struct field at index.
sfield := t.Type.Field(i)
rtype := sfield.Type
// Get nested field TypeIter with appropriate flags.
flags := xunsafe.ReflectStructFieldFlags(t.Flag, rtype)
ft := t.Child(sfield.Type, flags)
// Get field mangler.
fn := loadOrGet(ft)
if fn == nil {
return nil
}
// Set field info.
fields[i] = field{
mangle: fn,
offset: sfield.Offset,
}
}
// Handle no. fields.
switch len(fields) {
case 0:
return empty_mangler
case 1:
return fields[0].mangle
default:
return func(buf []byte, ptr unsafe.Pointer) []byte {
for i := range fields {
// Get struct field ptr via offset.
fptr := add(ptr, fields[i].offset)
// Mangle the struct field data.
buf = fields[i].mangle(buf, fptr)
buf = append(buf, ',')
}
if len(fields) > 0 {
// Drop final comma.
buf = buf[:len(buf)-1]
}
return buf
}
}
}

20
vendor/codeberg.org/gruf/go-mangler/v2/type.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package mangler
import (
"codeberg.org/gruf/go-xunsafe"
)
// visit checks if current type has already
// appeared in the TypeIter{}'s parent heirarchy.
func visit(iter xunsafe.TypeIter) bool {
t := iter.Type
// Check if type is already encountered further up tree.
for node := iter.Parent; node != nil; node = node.Parent {
if node.Type == t {
return false
}
}
return true
}

View file

@ -1,4 +1,4 @@
//go:build go1.22 && !go1.25
//go:build go1.22 && !go1.26
package mutexes
@ -21,7 +21,7 @@ func syncCond_last_ticket(c *sync.Cond) uint32 {
// this does not alter the first
// 2 fields which are all we need.
type notifyList struct {
_ atomic.Uint32
_ uint32
notify uint32
// ... other fields
}

View file

@ -2,7 +2,7 @@
A library with a series of performant data types with automated struct value indexing. Indexing is supported via arbitrary combinations of fields, and in the case of the cache type, negative results (errors!) are also supported.
Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler](https://codeberg.org/gruf/go-mangler), which at this point in time supports *most* arbitrary types (other than maps, channels, functions), so feel free to index by by almost *anything*!
Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler/v2](https://codeberg.org/gruf/go-mangler), which at this point in time supports *most* arbitrary types (other than channels, functions), so feel free to index by by almost *anything*!
See the [docs](https://pkg.go.dev/codeberg.org/gruf/go-structr) for more API information.

View file

@ -3,7 +3,6 @@ package structr
import (
"context"
"errors"
"reflect"
"sync"
"unsafe"
)
@ -83,7 +82,7 @@ type Cache[StructType any] struct {
// Init initializes the cache with given configuration
// including struct fields to index, and necessary fns.
func (c *Cache[T]) Init(config CacheConfig[T]) {
t := reflect.TypeOf((*T)(nil)).Elem()
t := get_type_iter[T]()
if len(config.Indices) == 0 {
panic("no indices provided")
@ -182,9 +181,14 @@ func (c *Cache[T]) Put(values ...T) {
// Acquire lock.
c.mutex.Lock()
// Wrap unlock to only do once.
unlock := once(c.mutex.Unlock)
defer unlock()
// Ensure mutex
// gets unlocked.
var unlocked bool
defer func() {
if !unlocked {
c.mutex.Unlock()
}
}()
// Check cache init.
if c.copy == nil {
@ -202,9 +206,9 @@ func (c *Cache[T]) Put(values ...T) {
// Get func ptrs.
invalid := c.invalid
// Done with
// the lock.
unlock()
// Done with lock.
c.mutex.Unlock()
unlocked = true
if invalid != nil {
// Pass all invalidated values
@ -241,9 +245,14 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Acquire lock.
c.mutex.Lock()
// Wrap unlock to only do once.
unlock := once(c.mutex.Unlock)
defer unlock()
// Ensure mutex
// gets unlocked.
var unlocked bool
defer func() {
if !unlocked {
c.mutex.Unlock()
}
}()
// Check init'd.
if c.copy == nil ||
@ -276,9 +285,9 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Get func ptrs.
ignore := c.ignore
// Done with
// the lock.
unlock()
// Done with lock.
c.mutex.Unlock()
unlocked = true
if ok {
// item found!
@ -295,6 +304,7 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Acquire lock.
c.mutex.Lock()
unlocked = false
// Index this new loaded item.
// Note this handles copying of
@ -308,6 +318,7 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Done with lock.
c.mutex.Unlock()
unlocked = true
return val, err
}
@ -328,9 +339,14 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Acquire lock.
c.mutex.Lock()
// Wrap unlock to only do once.
unlock := once(c.mutex.Unlock)
defer unlock()
// Ensure mutex
// gets unlocked.
var unlocked bool
defer func() {
if !unlocked {
c.mutex.Unlock()
}
}()
// Check init'd.
if c.copy == nil {
@ -366,9 +382,9 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
}
}
// Done with
// the lock.
unlock()
// Done with lock.
c.mutex.Unlock()
unlocked = true
if len(toLoad) == 0 {
// We loaded everything!
@ -383,6 +399,7 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Acquire lock.
c.mutex.Lock()
unlocked = false
// Store all uncached values.
for i := range uncached {
@ -394,6 +411,7 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Done with lock.
c.mutex.Unlock()
unlocked = true
// Append uncached to return values.
values = append(values, uncached...)
@ -685,7 +703,7 @@ func (c *Cache[T]) store_error(index *Index, key string, err error) {
}
func (c *Cache[T]) delete(i *indexed_item) {
for len(i.indexed) != 0 {
for len(i.indexed) > 0 {
// Pop last indexed entry from list.
entry := i.indexed[len(i.indexed)-1]
i.indexed[len(i.indexed)-1] = nil

View file

@ -1,7 +1,6 @@
package structr
import (
"fmt"
"os"
"reflect"
"strings"
@ -9,6 +8,7 @@ import (
"unsafe"
"codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-xunsafe"
)
// IndexConfig defines config variables
@ -29,7 +29,7 @@ type IndexConfig struct {
// is nil then it will not be indexed.
//
// Field types supported include any of those
// supported by the `go-mangler` library.
// supported by the `go-mangler/v2` library.
Fields string
// Multiple indicates whether to accept multiple
@ -58,7 +58,7 @@ type IndexConfig struct {
type Index struct {
// ptr is a pointer to
// the source Cache/Queue
// the source type this
// index is attached to.
ptr unsafe.Pointer
@ -68,14 +68,12 @@ type Index struct {
name string
// backing data store of the index, containing
// the cached results contained within wrapping
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
// list{}s of index_entry{}s which each contain
// the exact key each result is stored under.
data hashmap
// struct fields encompassed by
// keys (+ hashes) of this index.
// struct fields encompassed
// by keys of this index.
fields []struct_field
// index flags:
@ -89,55 +87,14 @@ func (i *Index) Name() string {
return i.name
}
// Key generates Key{} from given parts for
// the type of lookup this Index uses in cache.
// NOTE: panics on incorrect no. parts / types given.
func (i *Index) Key(parts ...any) Key {
ptrs := make([]unsafe.Pointer, len(parts))
for x, part := range parts {
ptrs[x] = eface_data(part)
}
buf := new_buffer()
key := i.key(buf, ptrs)
free_buffer(buf)
return Key{
raw: parts,
key: key,
}
}
// Keys generates []Key{} from given (multiple) parts
// for the type of lookup this Index uses in the cache.
// NOTE: panics on incorrect no. parts / types given.
func (i *Index) Keys(parts ...[]any) []Key {
keys := make([]Key, 0, len(parts))
buf := new_buffer()
for _, parts := range parts {
ptrs := make([]unsafe.Pointer, len(parts))
for x, part := range parts {
ptrs[x] = eface_data(part)
}
key := i.key(buf, ptrs)
if key == "" {
continue
}
keys = append(keys, Key{
raw: parts,
key: key,
})
}
free_buffer(buf)
return keys
}
// init will initialize the cache with given type, config and capacity.
func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
func (i *Index) init(t xunsafe.TypeIter, cfg IndexConfig, cap int) {
switch {
// The only 2 types we support are
// structs, and ptrs to a struct.
case t.Kind() == reflect.Struct:
case t.Kind() == reflect.Pointer &&
t.Elem().Kind() == reflect.Struct:
case t.Type.Kind() == reflect.Struct:
case t.Type.Kind() == reflect.Pointer &&
t.Type.Elem().Kind() == reflect.Struct:
default:
panic("index only support struct{} and *struct{}")
}
@ -164,8 +121,8 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
// Split name to account for nesting.
names := strings.Split(name, ".")
// Look for usable struct field.
i.fields[x] = find_field(t, names)
// Look for struct field by names.
i.fields[x], _ = find_field(t, names)
}
// Initialize store for
@ -219,15 +176,12 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
}
}
// key uses hasher to generate Key{} from given raw parts.
// key ...
func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
buf.B = buf.B[:0]
if len(parts) != len(i.fields) {
panic(fmt.Sprintf("incorrect number key parts: want=%d received=%d",
len(i.fields),
len(parts),
))
panic(assert("len(parts) = len(i.fields)"))
}
buf.B = buf.B[:0]
if !allow_zero(i.flags) {
for x, field := range i.fields {
before := len(buf.B)
@ -401,7 +355,7 @@ func (i *Index) delete_entry(entry *index_entry) {
// index_entry represents a single entry
// in an Index{}, where it will be accessible
// by Key{} pointing to a containing list{}.
// by .key pointing to a containing list{}.
type index_entry struct {
// list elem that entry is stored

View file

@ -4,6 +4,7 @@ import (
"sync"
"codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-mangler/v2"
)
// Key represents one key to
@ -14,6 +15,37 @@ type Key struct {
raw []any
}
// MakeKey generates Key{} from given parts.
func MakeKey(parts ...any) Key {
buf := new_buffer()
buf.B = mangler.AppendMulti(buf.B[:0], parts...)
key := string(buf.B)
free_buffer(buf)
return Key{
raw: parts,
key: key,
}
}
// MakeKeys generates []Key{} from given (multiple) parts.
func MakeKeys(parts ...[]any) []Key {
keys := make([]Key, len(parts))
if len(keys) != len(parts) {
panic(assert("BCE"))
}
buf := new_buffer()
for x, parts := range parts {
buf.B = mangler.AppendMulti(buf.B[:0], parts...)
key := string(buf.B)
keys[x] = Key{
raw: parts,
key: key,
}
}
free_buffer(buf)
return keys
}
// Key returns the underlying cache key string.
// NOTE: this will not be log output friendly.
func (k Key) Key() string {
@ -31,11 +63,6 @@ func (k Key) Values() []any {
return k.raw
}
// Zero indicates a zero value key.
func (k Key) Zero() bool {
return (k.key == "")
}
var buf_pool sync.Pool
// new_buffer returns a new initialized byte buffer.

View file

@ -177,11 +177,3 @@ func (l *list) remove(elem *list_elem) {
// Decr count
l.len--
}
// func (l *list) range_up(yield func(*list_elem) bool) {
// }
// func (l *list) range_down(yield func(*list_elem) bool) {
// }

View file

@ -1,7 +1,6 @@
package structr
import (
"reflect"
"sync"
"unsafe"
)
@ -48,7 +47,7 @@ type Queue[StructType any] struct {
// Init initializes the queue with given configuration
// including struct fields to index, and necessary fns.
func (q *Queue[T]) Init(config QueueConfig[T]) {
t := reflect.TypeOf((*T)(nil)).Elem()
t := get_type_iter[T]()
if len(config.Indices) == 0 {
panic("no indices provided")
@ -323,7 +322,7 @@ func (q *Queue[T]) index(value T) *indexed_item {
}
func (q *Queue[T]) delete(i *indexed_item) {
for len(i.indexed) != 0 {
for len(i.indexed) > 0 {
// Pop last indexed entry from list.
entry := i.indexed[len(i.indexed)-1]
i.indexed[len(i.indexed)-1] = nil

View file

@ -1,5 +1,3 @@
//go:build go1.22 && !go1.25
package structr
import (
@ -11,17 +9,16 @@ import (
"unicode/utf8"
"unsafe"
"codeberg.org/gruf/go-mangler"
"codeberg.org/gruf/go-mangler/v2"
"codeberg.org/gruf/go-xunsafe"
)
// struct_field contains pre-prepared type
// information about a struct's field member,
// including memory offset and hash function.
type struct_field struct {
rtype reflect.Type
// struct field type mangling
// (i.e. fast serializing) fn.
// mangle ...
mangle mangler.Mangler
// zero value data, used when
@ -30,18 +27,13 @@ type struct_field struct {
zero unsafe.Pointer
// mangled zero value string,
// if set this indicates zero
// values of field not allowed
// to check zero value keys.
zerostr string
// offsets defines whereabouts in
// memory this field is located.
// memory this field is located,
// and after how many dereferences.
offsets []next_offset
// determines whether field type
// is ptr-like in-memory, and so
// requires a further dereference.
likeptr bool
}
// next_offset defines a next offset location
@ -49,13 +41,22 @@ type struct_field struct {
// derefences required, then by offset from
// that final memory location.
type next_offset struct {
derefs uint
derefs int
offset uintptr
}
// get_type_iter returns a prepared xunsafe.TypeIter{} for generic parameter type,
// with flagIndir specifically set as we always take a reference to value type.
func get_type_iter[T any]() xunsafe.TypeIter {
rtype := reflect.TypeOf((*T)(nil)).Elem()
flags := xunsafe.Reflect_flag(xunsafe.Abi_Type_Kind(rtype))
flags |= xunsafe.Reflect_flagIndir // always comes from unsafe ptr
return xunsafe.ToTypeIter(rtype, flags)
}
// find_field will search for a struct field with given set of names,
// where names is a len > 0 slice of names account for struct nesting.
func find_field(t reflect.Type, names []string) (sfield struct_field) {
func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype reflect.Type) {
var (
// is_exported returns whether name is exported
// from a package; can be func or struct field.
@ -84,23 +85,42 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
// Pop next name.
name := pop_name()
var off next_offset
var n int
rtype := t.Type
flags := t.Flag
// Dereference any ptrs to struct.
for t.Kind() == reflect.Pointer {
t = t.Elem()
off.derefs++
// Iteratively dereference pointer types.
for rtype.Kind() == reflect.Pointer {
// If this actual indirect memory,
// increase dereferences counter.
if flags&xunsafe.Reflect_flagIndir != 0 {
n++
}
// Get next elem type.
rtype = rtype.Elem()
// Get next set of dereferenced element type flags.
flags = xunsafe.ReflectPointerElemFlags(flags, rtype)
// Update type iter info.
t = t.Child(rtype, flags)
}
// Check for valid struct type.
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("field %s is not struct (or ptr-to): %s", t, name))
if rtype.Kind() != reflect.Struct {
panic(fmt.Sprintf("field %s is not struct (or ptr-to): %s", rtype, name))
}
// Set offset info.
var off next_offset
off.derefs = n
var ok bool
// Look for next field by name.
field, ok = t.FieldByName(name)
// Look for the next field by name.
field, ok = rtype.FieldByName(name)
if !ok {
panic(fmt.Sprintf("unknown field: %s", name))
}
@ -109,24 +129,29 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
off.offset = field.Offset
sfield.offsets = append(sfield.offsets, off)
// Set the next type.
t = field.Type
// Calculate value flags, and set next nested field type.
flags = xunsafe.ReflectStructFieldFlags(t.Flag, field.Type)
t = t.Child(field.Type, flags)
}
// Check if ptr-like in-memory.
sfield.likeptr = like_ptr(t)
// Set final field type.
ftype = t.TypeInfo.Type
// Set final type.
sfield.rtype = t
// Find mangler for field type.
// Get mangler from type info.
sfield.mangle = mangler.Get(t)
// Get new zero value data ptr.
v := reflect.New(t).Elem()
zptr := eface_data(v.Interface())
zstr := sfield.mangle(nil, zptr)
sfield.zerostr = string(zstr)
// Get field type as zero interface.
v := reflect.New(t.Type).Elem()
vi := v.Interface()
// Get argument mangler from iface.
ti := xunsafe.TypeIterFrom(vi)
mangleArg := mangler.Get(ti)
// Calculate zero value string.
zptr := xunsafe.UnpackEface(vi)
zstr := string(mangleArg(nil, zptr))
sfield.zerostr = zstr
sfield.zero = zptr
return
@ -158,11 +183,6 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer
offset.offset)
}
if field.likeptr && fptr != nil {
// Further dereference value ptr.
fptr = *(*unsafe.Pointer)(fptr)
}
if fptr == nil {
// Use zero value.
fptr = field.zero
@ -179,26 +199,26 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer
// information about a primary key struct's
// field member, including memory offset.
type pkey_field struct {
rtype reflect.Type
// zero value data, used when
// nil encountered during ptr
// offset following.
zero unsafe.Pointer
// offsets defines whereabouts in
// memory this field is located.
offsets []next_offset
// determines whether field type
// is ptr-like in-memory, and so
// requires a further dereference.
likeptr bool
}
// extract_pkey will extract a pointer from 'ptr', to
// the primary key struct field defined by 'field'.
func extract_pkey(ptr unsafe.Pointer, field pkey_field) unsafe.Pointer {
for _, offset := range field.offsets {
// Dereference any ptrs to offset.
ptr = deref(ptr, offset.derefs)
if ptr == nil {
return nil
break
}
// Jump forward by offset to next ptr.
@ -206,43 +226,16 @@ func extract_pkey(ptr unsafe.Pointer, field pkey_field) unsafe.Pointer {
offset.offset)
}
if field.likeptr && ptr != nil {
// Further dereference value ptr.
ptr = *(*unsafe.Pointer)(ptr)
if ptr == nil {
// Use zero value.
ptr = field.zero
}
return ptr
}
// like_ptr returns whether type's kind is ptr-like in-memory,
// which indicates it may need a final additional dereference.
func like_ptr(t reflect.Type) bool {
switch t.Kind() {
case reflect.Array:
switch n := t.Len(); n {
case 1:
// specifically single elem arrays
// follow like_ptr for contained type.
return like_ptr(t.Elem())
}
case reflect.Struct:
switch n := t.NumField(); n {
case 1:
// specifically single field structs
// follow like_ptr for contained type.
return like_ptr(t.Field(0).Type)
}
case reflect.Pointer,
reflect.Map,
reflect.Chan,
reflect.Func:
return true
}
return false
}
// deref will dereference ptr 'n' times (or until nil).
func deref(p unsafe.Pointer, n uint) unsafe.Pointer {
func deref(p unsafe.Pointer, n int) unsafe.Pointer {
for ; n > 0; n-- {
if p == nil {
return nil
@ -252,24 +245,16 @@ func deref(p unsafe.Pointer, n uint) unsafe.Pointer {
return p
}
// eface_data returns the data ptr from an empty interface.
func eface_data(a any) unsafe.Pointer {
type eface struct{ _, data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&a)).data
}
// assert can be called to indicated a block
// of code should not be able to be reached,
// it returns a BUG report with callsite.
//
//go:noinline
func assert(assert string) string {
pcs := make([]uintptr, 1)
_ = runtime.Callers(2, pcs)
fn := runtime.FuncForPC(pcs[0])
funcname := "go-structr" // by default use just our library name
if fn != nil {
funcname = fn.Name()
if frames := runtime.CallersFrames(pcs); frames != nil {
frame, _ := frames.Next()
funcname = frame.Function
if i := strings.LastIndexByte(funcname, '/'); i != -1 {
funcname = funcname[i+1:]
}

View file

@ -5,6 +5,7 @@ import (
"os"
"reflect"
"slices"
"strings"
"sync"
"unsafe"
)
@ -89,7 +90,7 @@ type Timeline[StructType any, PK cmp.Ordered] struct {
// Init initializes the timeline with given configuration
// including struct fields to index, and necessary fns.
func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
rt := reflect.TypeOf((*T)(nil)).Elem()
ti := get_type_iter[T]()
if len(config.Indices) == 0 {
panic("no indices provided")
@ -99,6 +100,17 @@ func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
panic("copy function must be provided")
}
if strings.Contains(config.PKey.Fields, ",") {
panic("primary key must contain only 1 field")
}
// Verify primary key parameter type is correct.
names := strings.Split(config.PKey.Fields, ".")
if _, ftype := find_field(ti, names); //
ftype != reflect.TypeFor[PK]() {
panic("primary key field path and generic parameter type do not match")
}
// Safely copy over
// provided config.
t.mutex.Lock()
@ -108,21 +120,17 @@ func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
// other indices are created as expected.
t.indices = make([]Index, len(config.Indices)+1)
t.indices[0].ptr = unsafe.Pointer(t)
t.indices[0].init(rt, config.PKey, 0)
if len(t.indices[0].fields) > 1 {
panic("primary key must contain only 1 field")
}
t.indices[0].init(ti, config.PKey, 0)
for i, cfg := range config.Indices {
t.indices[i+1].ptr = unsafe.Pointer(t)
t.indices[i+1].init(rt, cfg, 0)
t.indices[i+1].init(ti, cfg, 0)
}
// Extract pkey details from index.
field := t.indices[0].fields[0]
t.pkey = pkey_field{
rtype: field.rtype,
zero: field.zero,
offsets: field.offsets,
likeptr: field.likeptr,
}
// Copy over remaining.
@ -220,15 +228,7 @@ func (t *Timeline[T, PK]) Insert(values ...T) int {
// Extract primary key from vptr.
kptr := extract_pkey(vptr, t.pkey)
var pkey PK
if kptr != nil {
// Cast as PK type.
pkey = *(*PK)(kptr)
} else {
// Use zero value pointer.
kptr = unsafe.Pointer(&pkey)
}
pkey := *(*PK)(kptr)
// Append wrapped value to slice with
// the acquire pointers and primary key.
@ -241,10 +241,8 @@ func (t *Timeline[T, PK]) Insert(values ...T) int {
}
}
var last *list_elem
// BEFORE inserting the prepared slice of value copies w/ primary
// keys, sort them by their primary key, ascending. This permits
// keys, sort them by their primary key, descending. This permits
// us to re-use the 'last' timeline position as next insert cursor.
// Otherwise we would have to iterate from 'head' every single time.
slices.SortFunc(with_keys, func(a, b value_with_pk[T, PK]) int {
@ -259,6 +257,8 @@ func (t *Timeline[T, PK]) Insert(values ...T) int {
}
})
var last *list_elem
// Store each value in the timeline,
// updating the last used list element
// each time so we don't have to iter
@ -1071,7 +1071,7 @@ indexing:
}
func (t *Timeline[T, PK]) delete(i *timeline_item) {
for len(i.indexed) != 0 {
for len(i.indexed) > 0 {
// Pop last indexed entry from list.
entry := i.indexed[len(i.indexed)-1]
i.indexed[len(i.indexed)-1] = nil
@ -1126,9 +1126,9 @@ func from_timeline_item(item *timeline_item) *indexed_item {
func to_timeline_item(item *indexed_item) *timeline_item {
to := (*timeline_item)(unsafe.Pointer(item))
if to.ck != ^uint(0) {
// ensure check bits are set indicating
// ensure check bits set, indicating
// it was a timeline_item originally.
panic(assert("check bits are set"))
panic(assert("t.ck = ^uint(0)"))
}
return to
}

View file

@ -1,13 +0,0 @@
package structr
// once only executes 'fn' once.
func once(fn func()) func() {
var once int32
return func() {
if once != 0 {
return
}
once = 1
fn()
}
}

View file

@ -1,6 +0,0 @@
language: go
go:
- 1.6
- 1.7
- tip

22
vendor/modules.txt vendored
View file

@ -221,7 +221,7 @@ code.superseriousbusiness.org/oauth2/v4/server
# codeberg.org/gruf/go-bitutil v1.1.0
## explicit; go 1.19
codeberg.org/gruf/go-bitutil
# codeberg.org/gruf/go-bytesize v1.0.3
# codeberg.org/gruf/go-bytesize v1.0.4
## explicit; go 1.17
codeberg.org/gruf/go-bytesize
# codeberg.org/gruf/go-byteutil v1.3.0
@ -247,7 +247,7 @@ codeberg.org/gruf/go-fastcopy
# codeberg.org/gruf/go-fastpath/v2 v2.0.0
## explicit; go 1.14
codeberg.org/gruf/go-fastpath/v2
# codeberg.org/gruf/go-ffmpreg v0.6.8
# codeberg.org/gruf/go-ffmpreg v0.6.9
## explicit; go 1.22.0
codeberg.org/gruf/go-ffmpreg/embed
codeberg.org/gruf/go-ffmpreg/wasm
@ -258,23 +258,23 @@ codeberg.org/gruf/go-iotools
## explicit; go 1.20
codeberg.org/gruf/go-kv
codeberg.org/gruf/go-kv/format
# codeberg.org/gruf/go-kv/v2 v2.0.6
# codeberg.org/gruf/go-kv/v2 v2.0.7
## explicit; go 1.24.5
codeberg.org/gruf/go-kv/v2
codeberg.org/gruf/go-kv/v2/format
# codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f
## explicit; go 1.21.3
codeberg.org/gruf/go-list
# codeberg.org/gruf/go-mangler v1.4.4
## explicit; go 1.19
codeberg.org/gruf/go-mangler
# codeberg.org/gruf/go-mangler/v2 v2.0.6
## explicit; go 1.24.5
codeberg.org/gruf/go-mangler/v2
# codeberg.org/gruf/go-maps v1.0.4
## explicit; go 1.20
codeberg.org/gruf/go-maps
# codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760
## explicit; go 1.22.2
codeberg.org/gruf/go-mempool
# codeberg.org/gruf/go-mutexes v1.5.2
# codeberg.org/gruf/go-mutexes v1.5.3
## explicit; go 1.22.2
codeberg.org/gruf/go-mutexes
# codeberg.org/gruf/go-runners v1.6.3
@ -293,8 +293,8 @@ codeberg.org/gruf/go-storage/disk
codeberg.org/gruf/go-storage/internal
codeberg.org/gruf/go-storage/memory
codeberg.org/gruf/go-storage/s3
# codeberg.org/gruf/go-structr v0.9.7
## explicit; go 1.22
# codeberg.org/gruf/go-structr v0.9.8
## explicit; go 1.24.5
codeberg.org/gruf/go-structr
# codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73
## explicit; go 1.24.5
@ -916,8 +916,8 @@ github.com/tinylib/msgp/msgp
# github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc
## explicit
github.com/tmthrgd/go-hex
# github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
## explicit
# github.com/tomnomnom/linkheader v0.0.0-20250811210735-e5fe3b51442e
## explicit; go 1.13
github.com/tomnomnom/linkheader
# github.com/toqueteos/webbrowser v1.2.0
## explicit; go 1.12