[chore] update dependencies (#4386)

- codeberg.org/gruf/go-bytesize v1.0.3 -> v1.0.4
- codeberg.org/gruf/go-kv/v2 v2.0.6 -> v2.0.7
- codeberg.org/gruf/go-mutexes v1.5.2 -> v1.5.3
- codeberg.org/gruf/go-structr v0.9.7 -> v0.9.8
- codeberg.org/gruf/go-ffmpreg v0.6.8 -> v0.6.9
- github.com/tomnomnom/linkheader HEAD@2018 -> HEAD@2025

all of the above codeberg.org/gruf updates are in preparation for Go1.25, except for bytesize, and also ffmpreg which is a rebuild with the latest version of ffmpeg (v5.1.7)

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4386
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-08-21 16:41:50 +02:00 committed by kim
commit a79f83cbde
38 changed files with 1246 additions and 964 deletions

View file

@ -2,7 +2,7 @@
A library with a series of performant data types with automated struct value indexing. Indexing is supported via arbitrary combinations of fields, and in the case of the cache type, negative results (errors!) are also supported.
Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler](https://codeberg.org/gruf/go-mangler), which at this point in time supports *most* arbitrary types (other than maps, channels, functions), so feel free to index by by almost *anything*!
Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler/v2](https://codeberg.org/gruf/go-mangler), which at this point in time supports *most* arbitrary types (other than channels, functions), so feel free to index by by almost *anything*!
See the [docs](https://pkg.go.dev/codeberg.org/gruf/go-structr) for more API information.

View file

@ -3,7 +3,6 @@ package structr
import (
"context"
"errors"
"reflect"
"sync"
"unsafe"
)
@ -83,7 +82,7 @@ type Cache[StructType any] struct {
// Init initializes the cache with given configuration
// including struct fields to index, and necessary fns.
func (c *Cache[T]) Init(config CacheConfig[T]) {
t := reflect.TypeOf((*T)(nil)).Elem()
t := get_type_iter[T]()
if len(config.Indices) == 0 {
panic("no indices provided")
@ -182,9 +181,14 @@ func (c *Cache[T]) Put(values ...T) {
// Acquire lock.
c.mutex.Lock()
// Wrap unlock to only do once.
unlock := once(c.mutex.Unlock)
defer unlock()
// Ensure mutex
// gets unlocked.
var unlocked bool
defer func() {
if !unlocked {
c.mutex.Unlock()
}
}()
// Check cache init.
if c.copy == nil {
@ -202,9 +206,9 @@ func (c *Cache[T]) Put(values ...T) {
// Get func ptrs.
invalid := c.invalid
// Done with
// the lock.
unlock()
// Done with lock.
c.mutex.Unlock()
unlocked = true
if invalid != nil {
// Pass all invalidated values
@ -241,9 +245,14 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Acquire lock.
c.mutex.Lock()
// Wrap unlock to only do once.
unlock := once(c.mutex.Unlock)
defer unlock()
// Ensure mutex
// gets unlocked.
var unlocked bool
defer func() {
if !unlocked {
c.mutex.Unlock()
}
}()
// Check init'd.
if c.copy == nil ||
@ -276,9 +285,9 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Get func ptrs.
ignore := c.ignore
// Done with
// the lock.
unlock()
// Done with lock.
c.mutex.Unlock()
unlocked = true
if ok {
// item found!
@ -295,6 +304,7 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Acquire lock.
c.mutex.Lock()
unlocked = false
// Index this new loaded item.
// Note this handles copying of
@ -308,6 +318,7 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Done with lock.
c.mutex.Unlock()
unlocked = true
return val, err
}
@ -328,9 +339,14 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Acquire lock.
c.mutex.Lock()
// Wrap unlock to only do once.
unlock := once(c.mutex.Unlock)
defer unlock()
// Ensure mutex
// gets unlocked.
var unlocked bool
defer func() {
if !unlocked {
c.mutex.Unlock()
}
}()
// Check init'd.
if c.copy == nil {
@ -366,9 +382,9 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
}
}
// Done with
// the lock.
unlock()
// Done with lock.
c.mutex.Unlock()
unlocked = true
if len(toLoad) == 0 {
// We loaded everything!
@ -383,6 +399,7 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Acquire lock.
c.mutex.Lock()
unlocked = false
// Store all uncached values.
for i := range uncached {
@ -394,6 +411,7 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Done with lock.
c.mutex.Unlock()
unlocked = true
// Append uncached to return values.
values = append(values, uncached...)
@ -685,7 +703,7 @@ func (c *Cache[T]) store_error(index *Index, key string, err error) {
}
func (c *Cache[T]) delete(i *indexed_item) {
for len(i.indexed) != 0 {
for len(i.indexed) > 0 {
// Pop last indexed entry from list.
entry := i.indexed[len(i.indexed)-1]
i.indexed[len(i.indexed)-1] = nil

View file

@ -1,7 +1,6 @@
package structr
import (
"fmt"
"os"
"reflect"
"strings"
@ -9,6 +8,7 @@ import (
"unsafe"
"codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-xunsafe"
)
// IndexConfig defines config variables
@ -29,7 +29,7 @@ type IndexConfig struct {
// is nil then it will not be indexed.
//
// Field types supported include any of those
// supported by the `go-mangler` library.
// supported by the `go-mangler/v2` library.
Fields string
// Multiple indicates whether to accept multiple
@ -58,7 +58,7 @@ type IndexConfig struct {
type Index struct {
// ptr is a pointer to
// the source Cache/Queue
// the source type this
// index is attached to.
ptr unsafe.Pointer
@ -68,14 +68,12 @@ type Index struct {
name string
// backing data store of the index, containing
// the cached results contained within wrapping
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
// list{}s of index_entry{}s which each contain
// the exact key each result is stored under.
data hashmap
// struct fields encompassed by
// keys (+ hashes) of this index.
// struct fields encompassed
// by keys of this index.
fields []struct_field
// index flags:
@ -89,55 +87,14 @@ func (i *Index) Name() string {
return i.name
}
// Key generates Key{} from given parts for
// the type of lookup this Index uses in cache.
// NOTE: panics on incorrect no. parts / types given.
func (i *Index) Key(parts ...any) Key {
ptrs := make([]unsafe.Pointer, len(parts))
for x, part := range parts {
ptrs[x] = eface_data(part)
}
buf := new_buffer()
key := i.key(buf, ptrs)
free_buffer(buf)
return Key{
raw: parts,
key: key,
}
}
// Keys generates []Key{} from given (multiple) parts
// for the type of lookup this Index uses in the cache.
// NOTE: panics on incorrect no. parts / types given.
func (i *Index) Keys(parts ...[]any) []Key {
keys := make([]Key, 0, len(parts))
buf := new_buffer()
for _, parts := range parts {
ptrs := make([]unsafe.Pointer, len(parts))
for x, part := range parts {
ptrs[x] = eface_data(part)
}
key := i.key(buf, ptrs)
if key == "" {
continue
}
keys = append(keys, Key{
raw: parts,
key: key,
})
}
free_buffer(buf)
return keys
}
// init will initialize the cache with given type, config and capacity.
func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
func (i *Index) init(t xunsafe.TypeIter, cfg IndexConfig, cap int) {
switch {
// The only 2 types we support are
// structs, and ptrs to a struct.
case t.Kind() == reflect.Struct:
case t.Kind() == reflect.Pointer &&
t.Elem().Kind() == reflect.Struct:
case t.Type.Kind() == reflect.Struct:
case t.Type.Kind() == reflect.Pointer &&
t.Type.Elem().Kind() == reflect.Struct:
default:
panic("index only support struct{} and *struct{}")
}
@ -164,8 +121,8 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
// Split name to account for nesting.
names := strings.Split(name, ".")
// Look for usable struct field.
i.fields[x] = find_field(t, names)
// Look for struct field by names.
i.fields[x], _ = find_field(t, names)
}
// Initialize store for
@ -219,15 +176,12 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
}
}
// key uses hasher to generate Key{} from given raw parts.
// key ...
func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
buf.B = buf.B[:0]
if len(parts) != len(i.fields) {
panic(fmt.Sprintf("incorrect number key parts: want=%d received=%d",
len(i.fields),
len(parts),
))
panic(assert("len(parts) = len(i.fields)"))
}
buf.B = buf.B[:0]
if !allow_zero(i.flags) {
for x, field := range i.fields {
before := len(buf.B)
@ -401,7 +355,7 @@ func (i *Index) delete_entry(entry *index_entry) {
// index_entry represents a single entry
// in an Index{}, where it will be accessible
// by Key{} pointing to a containing list{}.
// by .key pointing to a containing list{}.
type index_entry struct {
// list elem that entry is stored

View file

@ -4,6 +4,7 @@ import (
"sync"
"codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-mangler/v2"
)
// Key represents one key to
@ -14,6 +15,37 @@ type Key struct {
raw []any
}
// MakeKey generates Key{} from given parts.
func MakeKey(parts ...any) Key {
buf := new_buffer()
buf.B = mangler.AppendMulti(buf.B[:0], parts...)
key := string(buf.B)
free_buffer(buf)
return Key{
raw: parts,
key: key,
}
}
// MakeKeys generates []Key{} from given (multiple) parts.
func MakeKeys(parts ...[]any) []Key {
keys := make([]Key, len(parts))
if len(keys) != len(parts) {
panic(assert("BCE"))
}
buf := new_buffer()
for x, parts := range parts {
buf.B = mangler.AppendMulti(buf.B[:0], parts...)
key := string(buf.B)
keys[x] = Key{
raw: parts,
key: key,
}
}
free_buffer(buf)
return keys
}
// Key returns the underlying cache key string.
// NOTE: this will not be log output friendly.
func (k Key) Key() string {
@ -31,11 +63,6 @@ func (k Key) Values() []any {
return k.raw
}
// Zero indicates a zero value key.
func (k Key) Zero() bool {
return (k.key == "")
}
var buf_pool sync.Pool
// new_buffer returns a new initialized byte buffer.

View file

@ -177,11 +177,3 @@ func (l *list) remove(elem *list_elem) {
// Decr count
l.len--
}
// func (l *list) range_up(yield func(*list_elem) bool) {
// }
// func (l *list) range_down(yield func(*list_elem) bool) {
// }

View file

@ -1,7 +1,6 @@
package structr
import (
"reflect"
"sync"
"unsafe"
)
@ -48,7 +47,7 @@ type Queue[StructType any] struct {
// Init initializes the queue with given configuration
// including struct fields to index, and necessary fns.
func (q *Queue[T]) Init(config QueueConfig[T]) {
t := reflect.TypeOf((*T)(nil)).Elem()
t := get_type_iter[T]()
if len(config.Indices) == 0 {
panic("no indices provided")
@ -323,7 +322,7 @@ func (q *Queue[T]) index(value T) *indexed_item {
}
func (q *Queue[T]) delete(i *indexed_item) {
for len(i.indexed) != 0 {
for len(i.indexed) > 0 {
// Pop last indexed entry from list.
entry := i.indexed[len(i.indexed)-1]
i.indexed[len(i.indexed)-1] = nil

View file

@ -1,5 +1,3 @@
//go:build go1.22 && !go1.25
package structr
import (
@ -11,17 +9,16 @@ import (
"unicode/utf8"
"unsafe"
"codeberg.org/gruf/go-mangler"
"codeberg.org/gruf/go-mangler/v2"
"codeberg.org/gruf/go-xunsafe"
)
// struct_field contains pre-prepared type
// information about a struct's field member,
// including memory offset and hash function.
type struct_field struct {
rtype reflect.Type
// struct field type mangling
// (i.e. fast serializing) fn.
// mangle ...
mangle mangler.Mangler
// zero value data, used when
@ -30,18 +27,13 @@ type struct_field struct {
zero unsafe.Pointer
// mangled zero value string,
// if set this indicates zero
// values of field not allowed
// to check zero value keys.
zerostr string
// offsets defines whereabouts in
// memory this field is located.
// memory this field is located,
// and after how many dereferences.
offsets []next_offset
// determines whether field type
// is ptr-like in-memory, and so
// requires a further dereference.
likeptr bool
}
// next_offset defines a next offset location
@ -49,13 +41,22 @@ type struct_field struct {
// derefences required, then by offset from
// that final memory location.
type next_offset struct {
derefs uint
derefs int
offset uintptr
}
// get_type_iter returns a prepared xunsafe.TypeIter{} for generic parameter type,
// with flagIndir specifically set as we always take a reference to value type.
func get_type_iter[T any]() xunsafe.TypeIter {
rtype := reflect.TypeOf((*T)(nil)).Elem()
flags := xunsafe.Reflect_flag(xunsafe.Abi_Type_Kind(rtype))
flags |= xunsafe.Reflect_flagIndir // always comes from unsafe ptr
return xunsafe.ToTypeIter(rtype, flags)
}
// find_field will search for a struct field with given set of names,
// where names is a len > 0 slice of names account for struct nesting.
func find_field(t reflect.Type, names []string) (sfield struct_field) {
func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype reflect.Type) {
var (
// is_exported returns whether name is exported
// from a package; can be func or struct field.
@ -84,23 +85,42 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
// Pop next name.
name := pop_name()
var off next_offset
var n int
rtype := t.Type
flags := t.Flag
// Dereference any ptrs to struct.
for t.Kind() == reflect.Pointer {
t = t.Elem()
off.derefs++
// Iteratively dereference pointer types.
for rtype.Kind() == reflect.Pointer {
// If this actual indirect memory,
// increase dereferences counter.
if flags&xunsafe.Reflect_flagIndir != 0 {
n++
}
// Get next elem type.
rtype = rtype.Elem()
// Get next set of dereferenced element type flags.
flags = xunsafe.ReflectPointerElemFlags(flags, rtype)
// Update type iter info.
t = t.Child(rtype, flags)
}
// Check for valid struct type.
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("field %s is not struct (or ptr-to): %s", t, name))
if rtype.Kind() != reflect.Struct {
panic(fmt.Sprintf("field %s is not struct (or ptr-to): %s", rtype, name))
}
// Set offset info.
var off next_offset
off.derefs = n
var ok bool
// Look for next field by name.
field, ok = t.FieldByName(name)
// Look for the next field by name.
field, ok = rtype.FieldByName(name)
if !ok {
panic(fmt.Sprintf("unknown field: %s", name))
}
@ -109,24 +129,29 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
off.offset = field.Offset
sfield.offsets = append(sfield.offsets, off)
// Set the next type.
t = field.Type
// Calculate value flags, and set next nested field type.
flags = xunsafe.ReflectStructFieldFlags(t.Flag, field.Type)
t = t.Child(field.Type, flags)
}
// Check if ptr-like in-memory.
sfield.likeptr = like_ptr(t)
// Set final field type.
ftype = t.TypeInfo.Type
// Set final type.
sfield.rtype = t
// Find mangler for field type.
// Get mangler from type info.
sfield.mangle = mangler.Get(t)
// Get new zero value data ptr.
v := reflect.New(t).Elem()
zptr := eface_data(v.Interface())
zstr := sfield.mangle(nil, zptr)
sfield.zerostr = string(zstr)
// Get field type as zero interface.
v := reflect.New(t.Type).Elem()
vi := v.Interface()
// Get argument mangler from iface.
ti := xunsafe.TypeIterFrom(vi)
mangleArg := mangler.Get(ti)
// Calculate zero value string.
zptr := xunsafe.UnpackEface(vi)
zstr := string(mangleArg(nil, zptr))
sfield.zerostr = zstr
sfield.zero = zptr
return
@ -158,11 +183,6 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer
offset.offset)
}
if field.likeptr && fptr != nil {
// Further dereference value ptr.
fptr = *(*unsafe.Pointer)(fptr)
}
if fptr == nil {
// Use zero value.
fptr = field.zero
@ -179,26 +199,26 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer
// information about a primary key struct's
// field member, including memory offset.
type pkey_field struct {
rtype reflect.Type
// zero value data, used when
// nil encountered during ptr
// offset following.
zero unsafe.Pointer
// offsets defines whereabouts in
// memory this field is located.
offsets []next_offset
// determines whether field type
// is ptr-like in-memory, and so
// requires a further dereference.
likeptr bool
}
// extract_pkey will extract a pointer from 'ptr', to
// the primary key struct field defined by 'field'.
func extract_pkey(ptr unsafe.Pointer, field pkey_field) unsafe.Pointer {
for _, offset := range field.offsets {
// Dereference any ptrs to offset.
ptr = deref(ptr, offset.derefs)
if ptr == nil {
return nil
break
}
// Jump forward by offset to next ptr.
@ -206,43 +226,16 @@ func extract_pkey(ptr unsafe.Pointer, field pkey_field) unsafe.Pointer {
offset.offset)
}
if field.likeptr && ptr != nil {
// Further dereference value ptr.
ptr = *(*unsafe.Pointer)(ptr)
if ptr == nil {
// Use zero value.
ptr = field.zero
}
return ptr
}
// like_ptr returns whether type's kind is ptr-like in-memory,
// which indicates it may need a final additional dereference.
func like_ptr(t reflect.Type) bool {
switch t.Kind() {
case reflect.Array:
switch n := t.Len(); n {
case 1:
// specifically single elem arrays
// follow like_ptr for contained type.
return like_ptr(t.Elem())
}
case reflect.Struct:
switch n := t.NumField(); n {
case 1:
// specifically single field structs
// follow like_ptr for contained type.
return like_ptr(t.Field(0).Type)
}
case reflect.Pointer,
reflect.Map,
reflect.Chan,
reflect.Func:
return true
}
return false
}
// deref will dereference ptr 'n' times (or until nil).
func deref(p unsafe.Pointer, n uint) unsafe.Pointer {
func deref(p unsafe.Pointer, n int) unsafe.Pointer {
for ; n > 0; n-- {
if p == nil {
return nil
@ -252,24 +245,16 @@ func deref(p unsafe.Pointer, n uint) unsafe.Pointer {
return p
}
// eface_data returns the data ptr from an empty interface.
func eface_data(a any) unsafe.Pointer {
type eface struct{ _, data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&a)).data
}
// assert can be called to indicated a block
// of code should not be able to be reached,
// it returns a BUG report with callsite.
//
//go:noinline
func assert(assert string) string {
pcs := make([]uintptr, 1)
_ = runtime.Callers(2, pcs)
fn := runtime.FuncForPC(pcs[0])
funcname := "go-structr" // by default use just our library name
if fn != nil {
funcname = fn.Name()
if frames := runtime.CallersFrames(pcs); frames != nil {
frame, _ := frames.Next()
funcname = frame.Function
if i := strings.LastIndexByte(funcname, '/'); i != -1 {
funcname = funcname[i+1:]
}

View file

@ -5,6 +5,7 @@ import (
"os"
"reflect"
"slices"
"strings"
"sync"
"unsafe"
)
@ -89,7 +90,7 @@ type Timeline[StructType any, PK cmp.Ordered] struct {
// Init initializes the timeline with given configuration
// including struct fields to index, and necessary fns.
func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
rt := reflect.TypeOf((*T)(nil)).Elem()
ti := get_type_iter[T]()
if len(config.Indices) == 0 {
panic("no indices provided")
@ -99,6 +100,17 @@ func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
panic("copy function must be provided")
}
if strings.Contains(config.PKey.Fields, ",") {
panic("primary key must contain only 1 field")
}
// Verify primary key parameter type is correct.
names := strings.Split(config.PKey.Fields, ".")
if _, ftype := find_field(ti, names); //
ftype != reflect.TypeFor[PK]() {
panic("primary key field path and generic parameter type do not match")
}
// Safely copy over
// provided config.
t.mutex.Lock()
@ -108,21 +120,17 @@ func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
// other indices are created as expected.
t.indices = make([]Index, len(config.Indices)+1)
t.indices[0].ptr = unsafe.Pointer(t)
t.indices[0].init(rt, config.PKey, 0)
if len(t.indices[0].fields) > 1 {
panic("primary key must contain only 1 field")
}
t.indices[0].init(ti, config.PKey, 0)
for i, cfg := range config.Indices {
t.indices[i+1].ptr = unsafe.Pointer(t)
t.indices[i+1].init(rt, cfg, 0)
t.indices[i+1].init(ti, cfg, 0)
}
// Extract pkey details from index.
field := t.indices[0].fields[0]
t.pkey = pkey_field{
rtype: field.rtype,
zero: field.zero,
offsets: field.offsets,
likeptr: field.likeptr,
}
// Copy over remaining.
@ -220,15 +228,7 @@ func (t *Timeline[T, PK]) Insert(values ...T) int {
// Extract primary key from vptr.
kptr := extract_pkey(vptr, t.pkey)
var pkey PK
if kptr != nil {
// Cast as PK type.
pkey = *(*PK)(kptr)
} else {
// Use zero value pointer.
kptr = unsafe.Pointer(&pkey)
}
pkey := *(*PK)(kptr)
// Append wrapped value to slice with
// the acquire pointers and primary key.
@ -241,10 +241,8 @@ func (t *Timeline[T, PK]) Insert(values ...T) int {
}
}
var last *list_elem
// BEFORE inserting the prepared slice of value copies w/ primary
// keys, sort them by their primary key, ascending. This permits
// keys, sort them by their primary key, descending. This permits
// us to re-use the 'last' timeline position as next insert cursor.
// Otherwise we would have to iterate from 'head' every single time.
slices.SortFunc(with_keys, func(a, b value_with_pk[T, PK]) int {
@ -259,6 +257,8 @@ func (t *Timeline[T, PK]) Insert(values ...T) int {
}
})
var last *list_elem
// Store each value in the timeline,
// updating the last used list element
// each time so we don't have to iter
@ -1071,7 +1071,7 @@ indexing:
}
func (t *Timeline[T, PK]) delete(i *timeline_item) {
for len(i.indexed) != 0 {
for len(i.indexed) > 0 {
// Pop last indexed entry from list.
entry := i.indexed[len(i.indexed)-1]
i.indexed[len(i.indexed)-1] = nil
@ -1126,9 +1126,9 @@ func from_timeline_item(item *timeline_item) *indexed_item {
func to_timeline_item(item *indexed_item) *timeline_item {
to := (*timeline_item)(unsafe.Pointer(item))
if to.ck != ^uint(0) {
// ensure check bits are set indicating
// ensure check bits set, indicating
// it was a timeline_item originally.
panic(assert("check bits are set"))
panic(assert("t.ck = ^uint(0)"))
}
return to
}

View file

@ -1,13 +0,0 @@
package structr
// once only executes 'fn' once.
func once(fn func()) func() {
var once int32
return func() {
if once != 0 {
return
}
once = 1
fn()
}
}