[feature] Read + Write tombstones for deleted Actors (#1005)

* [feature] Read + Write tombstones for deleted Actors

* copyTombstone

* update to use resultcache instead of old ttl cache

Signed-off-by: kim <grufwub@gmail.com>

* update go-cache library to fix result cache capacity / ordering bugs

Signed-off-by: kim <grufwub@gmail.com>

* bump go-cache/v3 to v3.1.6 to fix bugs

Signed-off-by: kim <grufwub@gmail.com>

* switch on status code

* better explain ErrGone reasoning

Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: kim <grufwub@gmail.com>
This commit is contained in:
tobi 2022-11-11 12:18:38 +01:00 committed by GitHub
commit edcee14d07
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
47 changed files with 3808 additions and 7 deletions

9
vendor/codeberg.org/gruf/go-mangler/LICENSE generated vendored Normal file
View file

@ -0,0 +1,9 @@
MIT License
Copyright (c) 2022 gruf
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

40
vendor/codeberg.org/gruf/go-mangler/README.md generated vendored Normal file
View file

@ -0,0 +1,40 @@
# go-mangler
[Documentation](https://pkg.go.dev/codeberg.org/gruf/go-mangler).
To put it simply is a bit of an odd library. It aims to provide incredibly fast, unique string outputs for all default supported input data types during a given runtime instance.
It is useful, for example, for use as part of larger abstractions involving hashmaps. That was my particular usecase anyways...
This package does make liberal use of the "unsafe" package.
Benchmarks are below. Those with missing values panicked during our set of benchmarks, usually a case of not handling nil values elegantly. Please note the more important thing to notice here is the relative difference in benchmark scores, the actual `ns/op`,`B/op`,`allocs/op` accounts for running through over 80 possible test cases, including some not-ideal situations.
The choice of libraries in the benchmark are just a selection of libraries that could be used in a similar manner to this one, i.e. serializing in some manner.
```
goos: linux
goarch: amd64
pkg: codeberg.org/gruf/go-mangler
cpu: 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz
BenchmarkMangle
BenchmarkMangle-8 723278 1593 ns/op 1168 B/op 120 allocs/op
BenchmarkMangleHash
BenchmarkMangleHash-8 405380 2788 ns/op 4496 B/op 214 allocs/op
BenchmarkJSON
BenchmarkJSON-8 199360 6116 ns/op 4243 B/op 142 allocs/op
BenchmarkBinary
BenchmarkBinary-8 ------ ---- ns/op ---- B/op --- allocs/op
BenchmarkFmt
BenchmarkFmt-8 168500 7111 ns/op 2256 B/op 161 allocs/op
BenchmarkKelindarBinary
BenchmarkKelindarBinary-8 ------ ---- ns/op ---- B/op --- allocs/op
BenchmarkFxmackerCbor
BenchmarkFxmackerCbor-8 361416 3255 ns/op 1495 B/op 122 allocs/op
BenchmarkMitchellhHashStructure
BenchmarkMitchellhHashStructure-8 117672 10493 ns/op 8443 B/op 961 allocs/op
BenchmarkCnfStructhash
BenchmarkCnfStructhash-8 7078 161926 ns/op 288644 B/op 5843 allocs/op
PASS
ok codeberg.org/gruf/go-mangler 14.377s
```

97
vendor/codeberg.org/gruf/go-mangler/helpers.go generated vendored Normal file
View file

@ -0,0 +1,97 @@
package mangler
import (
"reflect"
"unsafe"
)
func deref_ptr_mangler(mangle Mangler, count int) rMangler {
return func(buf []byte, v reflect.Value) []byte {
for i := 0; i < count; i++ {
// Check for nil
if v.IsNil() {
buf = append(buf, '0')
return buf
}
// Further deref ptr
buf = append(buf, '1')
v = v.Elem()
}
// Mangle fully deref'd ptr
return mangle(buf, v.Interface())
}
}
func deref_ptr_rmangler(mangle rMangler, count int) rMangler {
return func(buf []byte, v reflect.Value) []byte {
for i := 0; i < count; i++ {
// Check for nil
if v.IsNil() {
buf = append(buf, '0')
return buf
}
// Further deref ptr
buf = append(buf, '1')
v = v.Elem()
}
// Mangle fully deref'd ptr
return mangle(buf, v)
}
}
func iter_array_mangler(mangle Mangler) rMangler {
return func(buf []byte, v reflect.Value) []byte {
n := v.Len()
for i := 0; i < n; i++ {
buf = mangle(buf, v.Index(i).Interface())
buf = append(buf, ',')
}
if n > 0 {
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_array_rmangler(mangle rMangler) rMangler {
return func(buf []byte, v reflect.Value) []byte {
n := v.Len()
for i := 0; i < n; i++ {
buf = mangle(buf, v.Index(i))
buf = append(buf, ',')
}
if n > 0 {
buf = buf[:len(buf)-1]
}
return buf
}
}
func iter_map_rmangler(kMangle, vMangle rMangler) rMangler {
return func(buf []byte, v reflect.Value) []byte {
r := v.MapRange()
for r.Next() {
buf = kMangle(buf, r.Key())
buf = append(buf, ':')
buf = vMangle(buf, r.Value())
buf = append(buf, '.')
}
if v.Len() > 0 {
buf = buf[:len(buf)-1]
}
return buf
}
}
// iface_value returns the raw value ptr for input boxed within interface{} type.
func iface_value(a any) unsafe.Pointer {
type eface struct {
Type unsafe.Pointer
Value unsafe.Pointer
}
return (*eface)(unsafe.Pointer(&a)).Value
}

354
vendor/codeberg.org/gruf/go-mangler/load.go generated vendored Normal file
View file

@ -0,0 +1,354 @@
package mangler
import (
"encoding"
"net/url"
"reflect"
"time"
)
// loadMangler is the top-most Mangler load function. It guarantees that a Mangler
// function will be returned for given value interface{} and reflected type. Else panics.
func loadMangler(a any, t reflect.Type) Mangler {
// Load mangler function
mng, rmng := load(a, t)
if rmng != nil {
// Wrap reflect mangler to handle iface
return func(buf []byte, a any) []byte {
return rmng(buf, reflect.ValueOf(a))
}
}
if mng == nil {
// No mangler function could be determined
panic("cannot mangle type: " + t.String())
}
return mng
}
// load will load a Mangler or reflect Mangler for given type and iface 'a'.
// Note: allocates new interface value if nil provided, i.e. if coming via reflection.
func load(a any, t reflect.Type) (Mangler, rMangler) {
if t == nil {
// There is no reflect type to search by
panic("cannot mangle nil interface{} type")
}
if a == nil {
// Alloc new iface instance
v := reflect.New(t).Elem()
a = v.Interface()
}
// Check in fast iface type switch
if mng := loadIface(a); mng != nil {
return mng, nil
}
// Search by reflection
return loadReflect(t)
}
// loadIface is used as a first-resort interface{} type switcher loader
// for types implementing Mangled and providing performant alternative
// Mangler functions for standard library types to avoid reflection.
func loadIface(a any) Mangler {
switch a.(type) {
case Mangled:
return mangle_mangled
case time.Time:
return mangle_time
case *time.Time:
return mangle_time_ptr
case *url.URL:
return mangle_stringer
case encoding.BinaryMarshaler:
return mangle_binary
// NOTE:
// we don't just handle ALL fmt.Stringer types as often
// the output is large and unwieldy and this interface
// switch is for types it would be faster to avoid reflection.
// If they want better performance they can implement Mangled{}.
default:
return nil
}
}
// loadReflect will load a Mangler (or rMangler) function for the given reflected type info.
// NOTE: this is used as the top level load function for nested reflective searches.
func loadReflect(t reflect.Type) (Mangler, rMangler) {
switch t.Kind() {
case reflect.Pointer:
return loadReflectPtr(t.Elem())
case reflect.String:
return mangle_string, nil
case reflect.Array:
return nil, loadReflectArray(t.Elem())
case reflect.Slice:
// Element type
et := t.Elem()
// Preferably look for known slice mangler func
if mng := loadReflectKnownSlice(et); mng != nil {
return mng, nil
}
// Else handle as array elements
return nil, loadReflectArray(et)
case reflect.Map:
return nil, loadReflectMap(t.Key(), t.Elem())
case reflect.Bool:
return mangle_bool, nil
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_platform_int, nil
case reflect.Int8,
reflect.Uint8:
return mangle_8bit, nil
case reflect.Int16,
reflect.Uint16:
return mangle_16bit, nil
case reflect.Int32,
reflect.Uint32:
return mangle_32bit, nil
case reflect.Int64,
reflect.Uint64:
return mangle_64bit, nil
case reflect.Float32:
return mangle_32bit, nil
case reflect.Float64:
return mangle_64bit, nil
case reflect.Complex64:
return mangle_64bit, nil
case reflect.Complex128:
return mangle_128bit, nil
default:
return nil, nil
}
}
// loadReflectPtr loads a Mangler (or rMangler) function for a ptr's element type.
// This also handles further dereferencing of any further ptr indrections (e.g. ***int).
func loadReflectPtr(et reflect.Type) (Mangler, rMangler) {
count := 1
// Iteratively dereference ptrs
for et.Kind() == reflect.Pointer {
et = et.Elem()
count++
}
if et.Kind() == reflect.Array {
// Special case of addressable (sliceable) array
if mng := loadReflectKnownSlice(et); mng != nil {
if count == 1 {
return mng, nil
}
return nil, deref_ptr_mangler(mng, count-1)
}
// Look for an array mangler function, this will
// access elements by index using reflect.Value and
// pass each one to a separate mangler function.
if rmng := loadReflectArray(et); rmng != nil {
return nil, deref_ptr_rmangler(rmng, count)
}
return nil, nil
}
// Try remove a layer of derefs by loading a mangler
// for a known ptr kind. The less reflection the better!
if mng := loadReflectKnownPtr(et); mng != nil {
if count == 1 {
return mng, nil
}
return nil, deref_ptr_mangler(mng, count-1)
}
// Search for ptr elemn type mangler
if mng, rmng := load(nil, et); mng != nil {
return nil, deref_ptr_mangler(mng, count)
} else if rmng != nil {
return nil, deref_ptr_rmangler(rmng, count)
}
return nil, nil
}
// loadReflectKnownPtr loads a Mangler function for a known ptr-of-element type (in this case, primtive ptrs).
func loadReflectKnownPtr(et reflect.Type) Mangler {
switch et.Kind() {
case reflect.String:
return mangle_string_ptr
case reflect.Bool:
return mangle_bool_ptr
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_platform_int_ptr
case reflect.Int8,
reflect.Uint8:
return mangle_8bit_ptr
case reflect.Int16,
reflect.Uint16:
return mangle_16bit_ptr
case reflect.Int32,
reflect.Uint32:
return mangle_32bit_ptr
case reflect.Int64,
reflect.Uint64:
return mangle_64bit_ptr
case reflect.Float32:
return mangle_32bit_ptr
case reflect.Float64:
return mangle_64bit_ptr
case reflect.Complex64:
return mangle_64bit_ptr
case reflect.Complex128:
return mangle_128bit_ptr
default:
return nil
}
}
// loadReflectKnownSlice loads a Mangler function for a known slice-of-element type (in this case, primtives).
func loadReflectKnownSlice(et reflect.Type) Mangler {
switch et.Kind() {
case reflect.String:
return mangle_string_slice
case reflect.Bool:
return mangle_bool_slice
case reflect.Int,
reflect.Uint,
reflect.Uintptr:
return mangle_platform_int_slice
case reflect.Int8,
reflect.Uint8:
return mangle_8bit_slice
case reflect.Int16,
reflect.Uint16:
return mangle_16bit_slice
case reflect.Int32,
reflect.Uint32:
return mangle_32bit_slice
case reflect.Int64,
reflect.Uint64:
return mangle_64bit_slice
case reflect.Float32:
return mangle_32bit_slice
case reflect.Float64:
return mangle_64bit_slice
case reflect.Complex64:
return mangle_64bit_slice
case reflect.Complex128:
return mangle_128bit_slice
default:
return nil
}
}
// loadReflectArray loads an rMangler function for an array (or slice) or given element type.
func loadReflectArray(et reflect.Type) rMangler {
// Search via reflected array element type
if mng, rmng := load(nil, et); mng != nil {
return iter_array_mangler(mng)
} else if rmng != nil {
return iter_array_rmangler(rmng)
}
return nil
}
// loadReflectMap ...
func loadReflectMap(kt, vt reflect.Type) rMangler {
var kmng, vmng rMangler
// Search for mangler for key type
mng, rmng := load(nil, kt)
switch {
// Wrap key mangler to reflect
case mng != nil:
mng := mng // take our own ptr
kmng = func(buf []byte, v reflect.Value) []byte {
return mng(buf, v.Interface())
}
// Use reflect key mangler as-is
case rmng != nil:
kmng = rmng
// No mangler found
default:
return nil
}
// Search for mangler for value type
mng, rmng = load(nil, vt)
switch {
// Wrap key mangler to reflect
case mng != nil:
mng := mng // take our own ptr
vmng = func(buf []byte, v reflect.Value) []byte {
return mng(buf, v.Interface())
}
// Use reflect key mangler as-is
case rmng != nil:
vmng = rmng
// No mangler found
default:
return nil
}
// Wrap key/value manglers in map iter
return iter_map_rmangler(kmng, vmng)
}

132
vendor/codeberg.org/gruf/go-mangler/mangle.go generated vendored Normal file
View file

@ -0,0 +1,132 @@
package mangler
import (
"encoding/binary"
"reflect"
"unsafe"
"github.com/cespare/xxhash"
"github.com/cornelk/hashmap"
)
var (
// manglers is a map of runtime type ptrs => Mangler functions.
manglers = hashmap.New[uintptr, Mangler]()
// bin is a short-hand for our chosen byteorder encoding.
bin = binary.LittleEndian
)
// Mangled is an interface that allows any type to implement a custom
// Mangler function to improve performance when mangling this type.
type Mangled interface {
Mangle(buf []byte) []byte
}
// Mangler is a function that will take an input interface value of known
// type, and append it in mangled serialized form to the given byte buffer.
// While the value type is an interface, the Mangler functions are accessed
// by the value's runtime type pointer, allowing the input value type to be known.
type Mangler func(buf []byte, value any) []byte
// rMangler is functionally the same as a Mangler function, but it
// takes the value input in reflected form. By specifying these differences
// in mangler function types, it allows us to cut back on new calls to
// `reflect.ValueOf()` and instead pass by existing reflected values.
type rMangler func(buf []byte, value reflect.Value) []byte
// Get will fetch the Mangler function for given runtime type.
func Get(t reflect.Type) (Mangler, bool) {
if t == nil {
return nil, false
}
uptr := uintptr(iface_value(t))
return manglers.Get(uptr)
}
// Register will register the given Mangler function for use with vars of given runtime type. This allows
// registering performant manglers for existing types not implementing Mangled (e.g. std library types).
// NOTE: panics if there already exists a Mangler function for given type. Register on init().
func Register(t reflect.Type, m Mangler) {
if t == nil {
// Nil interface{} types cannot be searched by, do not accept
panic("cannot register mangler for nil interface{} type")
}
// Get raw runtime type ptr
uptr := uintptr(iface_value(t))
// Ensure this is a unique encoder
if _, ok := manglers.Get(uptr); ok {
panic("already registered mangler for type: " + t.String())
}
// Cache this encoder func
manglers.Set(uptr, m)
}
// Append will append the mangled form of input value 'a' to buffer 'b'.
// See mangler.String() for more information on mangled output.
func Append(b []byte, a any) []byte {
// Get reflect type of 'a'
t := reflect.TypeOf(a)
// Get raw runtime type ptr
uptr := uintptr(iface_value(t))
// Look for a cached mangler
mng, ok := manglers.Get(uptr)
if !ok {
// Load mangler into cache
mng = loadMangler(a, t)
manglers.Set(uptr, mng)
}
// First write the type ptr (this adds
// a unique prefix for each runtime type).
b = mangle_platform_int(b, uptr)
// Finally, mangle value
return mng(b, a)
}
// String will return the mangled format of input value 'a'. This
// mangled output will be unique for all default supported input types
// during a single runtime instance. Uniqueness cannot be guaranteed
// between separate runtime instances (whether running concurrently, or
// the same application running at different times).
//
// The exact formatting of the output data should not be relied upon,
// only that it is unique given the above constraints. Generally though,
// the mangled output is the binary formatted text of given input data.
//
// Uniqueness is guaranteed for similar input data of differing types
// (e.g. string("hello world") vs. []byte("hello world")) by prefixing
// mangled output with the input data's runtime type pointer.
//
// Default supported types include:
// - string
// - bool
// - int,int8,int16,int32,int64
// - uint,uint8,uint16,uint32,uint64,uintptr
// - float32,float64
// - complex64,complex128
// - all type aliases of above
// - time.Time{}, *url.URL{}
// - mangler.Mangled{}
// - encoding.BinaryMarshaler{}
// - all pointers to the above
// - all slices / arrays of the above
// - all map keys / values of the above
func String(a any) string {
b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b))
}
// Hash returns the xxHash digest of the result of mangler.Append(nil, 'a').
func Hash(a any) uint64 {
b := make([]byte, 0, 32)
b = Append(b, a)
return xxhash.Sum64(b)
}

264
vendor/codeberg.org/gruf/go-mangler/manglers.go generated vendored Normal file
View file

@ -0,0 +1,264 @@
package mangler
import (
"encoding"
"fmt"
"math/bits"
"time"
_ "unsafe"
)
// Notes:
// the use of unsafe conversion from the direct interface values to
// the chosen types in each of the below functions allows us to convert
// not only those types directly, but anything type-aliased to those
// types. e.g. `time.Duration` directly as int64.
func mangle_string(buf []byte, a any) []byte {
return append(buf, *(*string)(iface_value(a))...)
}
func mangle_string_ptr(buf []byte, a any) []byte {
if ptr := (*string)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
return append(buf, *ptr...)
}
buf = append(buf, '0')
return buf
}
func mangle_string_slice(buf []byte, a any) []byte {
s := *(*[]string)(iface_value(a))
for _, s := range s {
buf = append(buf, s...)
buf = append(buf, ',')
}
if len(s) > 0 {
buf = buf[:len(buf)-1]
}
return buf
}
func mangle_bool(buf []byte, a any) []byte {
if *(*bool)(iface_value(a)) {
return append(buf, '1')
}
return append(buf, '0')
}
func mangle_bool_ptr(buf []byte, a any) []byte {
if ptr := (*bool)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
if *ptr {
return append(buf, '1')
}
return append(buf, '0')
}
buf = append(buf, '0')
return buf
}
func mangle_bool_slice(buf []byte, a any) []byte {
for _, b := range *(*[]bool)(iface_value(a)) {
if b {
buf = append(buf, '1')
} else {
buf = append(buf, '0')
}
}
return buf
}
func mangle_8bit(buf []byte, a any) []byte {
return append(buf, *(*uint8)(iface_value(a)))
}
func mangle_8bit_ptr(buf []byte, a any) []byte {
if ptr := (*uint8)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
return append(buf, *ptr)
}
buf = append(buf, '0')
return buf
}
func mangle_8bit_slice(buf []byte, a any) []byte {
return append(buf, *(*[]uint8)(iface_value(a))...)
}
func mangle_16bit(buf []byte, a any) []byte {
return bin.AppendUint16(buf, *(*uint16)(iface_value(a)))
}
func mangle_16bit_ptr(buf []byte, a any) []byte {
if ptr := (*uint16)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
return bin.AppendUint16(buf, *ptr)
}
buf = append(buf, '0')
return buf
}
func mangle_16bit_slice(buf []byte, a any) []byte {
for _, u := range *(*[]uint16)(iface_value(a)) {
buf = bin.AppendUint16(buf, u)
}
return buf
}
func mangle_32bit(buf []byte, a any) []byte {
return bin.AppendUint32(buf, *(*uint32)(iface_value(a)))
}
func mangle_32bit_ptr(buf []byte, a any) []byte {
if ptr := (*uint32)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
return bin.AppendUint32(buf, *ptr)
}
buf = append(buf, '0')
return buf
}
func mangle_32bit_slice(buf []byte, a any) []byte {
for _, u := range *(*[]uint32)(iface_value(a)) {
buf = bin.AppendUint32(buf, u)
}
return buf
}
func mangle_64bit(buf []byte, a any) []byte {
return bin.AppendUint64(buf, *(*uint64)(iface_value(a)))
}
func mangle_64bit_ptr(buf []byte, a any) []byte {
if ptr := (*uint64)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
return bin.AppendUint64(buf, *ptr)
}
buf = append(buf, '0')
return buf
}
func mangle_64bit_slice(buf []byte, a any) []byte {
for _, u := range *(*[]uint64)(iface_value(a)) {
buf = bin.AppendUint64(buf, u)
}
return buf
}
// mangle_platform_int contains the correct iface mangler on runtime for platform int size.
var mangle_platform_int = func() Mangler {
switch bits.UintSize {
case 32:
return mangle_32bit
case 64:
return mangle_64bit
default:
panic("unexpected platform int size")
}
}()
// mangle_platform_int_ptr contains the correct iface mangler on runtime for platform int size.
var mangle_platform_int_ptr = func() Mangler {
switch bits.UintSize {
case 32:
return mangle_32bit_ptr
case 64:
return mangle_64bit_ptr
default:
panic("unexpected platform int size")
}
}()
// mangle_platform_int_slice contains the correct iface mangler on runtime for platform int size.
var mangle_platform_int_slice = func() Mangler {
switch bits.UintSize {
case 32:
return mangle_32bit_slice
case 64:
return mangle_64bit_slice
default:
panic("unexpected platform int size")
}
}()
// uint128 provides an easily mangleable data type for 128bit data types to be cast into.
type uint128 [2]uint64
func mangle_128bit(buf []byte, a any) []byte {
u2 := *(*uint128)(iface_value(a))
buf = bin.AppendUint64(buf, u2[0])
buf = bin.AppendUint64(buf, u2[1])
return buf
}
func mangle_128bit_ptr(buf []byte, a any) []byte {
if ptr := (*uint128)(iface_value(a)); ptr != nil {
buf = append(buf, '1')
buf = bin.AppendUint64(buf, (*ptr)[0])
buf = bin.AppendUint64(buf, (*ptr)[1])
}
buf = append(buf, '0')
return buf
}
func mangle_128bit_slice(buf []byte, a any) []byte {
for _, u2 := range *(*[]uint128)(iface_value(a)) {
buf = bin.AppendUint64(buf, u2[0])
buf = bin.AppendUint64(buf, u2[1])
}
return buf
}
func mangle_time(buf []byte, a any) []byte {
t := *(*time.Time)(iface_value(a))
b, err := t.MarshalBinary()
if err != nil {
panic("marshal_time: " + err.Error())
}
return append(buf, b...)
}
func mangle_time_ptr(buf []byte, a any) []byte {
if ptr := (*time.Time)(iface_value(a)); ptr != nil {
b, err := ptr.MarshalBinary()
if err != nil {
panic("marshal_time: " + err.Error())
}
buf = append(buf, '1')
return append(buf, b...)
}
buf = append(buf, '0')
return buf
}
func mangle_mangled(buf []byte, a any) []byte {
if v := a.(Mangled); v != nil {
buf = append(buf, '1')
return v.Mangle(buf)
}
buf = append(buf, '0')
return buf
}
func mangle_binary(buf []byte, a any) []byte {
if v := a.(encoding.BinaryMarshaler); v != nil {
b, err := v.MarshalBinary()
if err != nil {
panic("mangle_binary: " + err.Error())
}
buf = append(buf, '1')
return append(buf, b...)
}
buf = append(buf, '0')
return buf
}
func mangle_stringer(buf []byte, a any) []byte {
if v := a.(fmt.Stringer); v != nil {
buf = append(buf, '1')
return append(buf, v.String()...)
}
buf = append(buf, '0')
return buf
}