[chore] update go dependencies (#4304)

- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-06-30 15:19:09 +02:00 committed by kim
commit 8b0ea56027
294 changed files with 139999 additions and 21873 deletions

View file

@ -80,6 +80,32 @@ Rich Feature Set includes:
rpc server/client codec to support msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
# Supported build tags
We gain performance by code-generating fast-paths for slices and maps of built-in types,
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
The results are 20-40% performance improvements.
Building and running is configured using build tags as below.
At runtime:
- codec.safe: run in safe mode (not using unsafe optimizations)
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
Pls use these mostly during development - use codec.XXX in your go files.
Build only:
- codec.build: used to generate fastpath and monomorphization code
Test only:
- codec.notmammoth: skip the mammoth generated tests
# Extension Support
Users can register a function to handle the encoding or decoding of their custom
@ -219,6 +245,12 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
go test -tags "alltests codec.safe" -run Suite
```
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
```
go test -tags codec.notmono -run Json
```
# Running Benchmarks
```

View file

@ -0,0 +1,259 @@
//go:build !notfastpath && !codec.notfastpath
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fastpath.go.tmpl - DO NOT EDIT.
package codec
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices.
//
// We define the functions and register them in this single file
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
// This file can be omitted without causing a build failure.
//
// The advantage of fast paths is:
// - Many calls bypass reflection altogether
//
// Currently support
// - slice of all builtin types (numeric, bool, string, []byte)
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
// AND values of type type int8/16/32, uint16/32
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example:
// m2 := map[string]int{}
// p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
//
import (
"reflect"
"slices"
"sort"
)
const fastpathEnabled = true
type fastpathARtid [56]uintptr
type fastpathRtRtid struct {
rtid uintptr
rt reflect.Type
}
type fastpathARtRtid [56]fastpathRtRtid
var (
fastpathAvRtidArr fastpathARtid
fastpathAvRtRtidArr fastpathARtRtid
fastpathAvRtid = fastpathAvRtidArr[:]
fastpathAvRtRtid = fastpathAvRtRtidArr[:]
)
func fastpathAvIndex(rtid uintptr) (i uint, ok bool) {
return searchRtids(fastpathAvRtid, rtid)
}
func init() {
var i uint = 0
fn := func(v interface{}) {
xrt := reflect.TypeOf(v)
xrtid := rt2id(xrt)
xptrtid := rt2id(reflect.PointerTo(xrt))
fastpathAvRtid[i] = xrtid
fastpathAvRtRtid[i] = fastpathRtRtid{rtid: xrtid, rt: xrt}
encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid)
decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid)
i++
}
fn([]interface{}(nil))
fn([]string(nil))
fn([][]byte(nil))
fn([]float32(nil))
fn([]float64(nil))
fn([]uint8(nil))
fn([]uint64(nil))
fn([]int(nil))
fn([]int32(nil))
fn([]int64(nil))
fn([]bool(nil))
fn(map[string]interface{}(nil))
fn(map[string]string(nil))
fn(map[string][]byte(nil))
fn(map[string]uint8(nil))
fn(map[string]uint64(nil))
fn(map[string]int(nil))
fn(map[string]int32(nil))
fn(map[string]float64(nil))
fn(map[string]bool(nil))
fn(map[uint8]interface{}(nil))
fn(map[uint8]string(nil))
fn(map[uint8][]byte(nil))
fn(map[uint8]uint8(nil))
fn(map[uint8]uint64(nil))
fn(map[uint8]int(nil))
fn(map[uint8]int32(nil))
fn(map[uint8]float64(nil))
fn(map[uint8]bool(nil))
fn(map[uint64]interface{}(nil))
fn(map[uint64]string(nil))
fn(map[uint64][]byte(nil))
fn(map[uint64]uint8(nil))
fn(map[uint64]uint64(nil))
fn(map[uint64]int(nil))
fn(map[uint64]int32(nil))
fn(map[uint64]float64(nil))
fn(map[uint64]bool(nil))
fn(map[int]interface{}(nil))
fn(map[int]string(nil))
fn(map[int][]byte(nil))
fn(map[int]uint8(nil))
fn(map[int]uint64(nil))
fn(map[int]int(nil))
fn(map[int]int32(nil))
fn(map[int]float64(nil))
fn(map[int]bool(nil))
fn(map[int32]interface{}(nil))
fn(map[int32]string(nil))
fn(map[int32][]byte(nil))
fn(map[int32]uint8(nil))
fn(map[int32]uint64(nil))
fn(map[int32]int(nil))
fn(map[int32]int32(nil))
fn(map[int32]float64(nil))
fn(map[int32]bool(nil))
sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] })
sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid })
slices.Sort(encBuiltinRtids)
slices.Sort(decBuiltinRtids)
}
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
switch v := iv.(type) {
case *[]interface{}:
*v = nil
case *[]string:
*v = nil
case *[][]byte:
*v = nil
case *[]float32:
*v = nil
case *[]float64:
*v = nil
case *[]uint8:
*v = nil
case *[]uint64:
*v = nil
case *[]int:
*v = nil
case *[]int32:
*v = nil
case *[]int64:
*v = nil
case *[]bool:
*v = nil
case *map[string]interface{}:
*v = nil
case *map[string]string:
*v = nil
case *map[string][]byte:
*v = nil
case *map[string]uint8:
*v = nil
case *map[string]uint64:
*v = nil
case *map[string]int:
*v = nil
case *map[string]int32:
*v = nil
case *map[string]float64:
*v = nil
case *map[string]bool:
*v = nil
case *map[uint8]interface{}:
*v = nil
case *map[uint8]string:
*v = nil
case *map[uint8][]byte:
*v = nil
case *map[uint8]uint8:
*v = nil
case *map[uint8]uint64:
*v = nil
case *map[uint8]int:
*v = nil
case *map[uint8]int32:
*v = nil
case *map[uint8]float64:
*v = nil
case *map[uint8]bool:
*v = nil
case *map[uint64]interface{}:
*v = nil
case *map[uint64]string:
*v = nil
case *map[uint64][]byte:
*v = nil
case *map[uint64]uint8:
*v = nil
case *map[uint64]uint64:
*v = nil
case *map[uint64]int:
*v = nil
case *map[uint64]int32:
*v = nil
case *map[uint64]float64:
*v = nil
case *map[uint64]bool:
*v = nil
case *map[int]interface{}:
*v = nil
case *map[int]string:
*v = nil
case *map[int][]byte:
*v = nil
case *map[int]uint8:
*v = nil
case *map[int]uint64:
*v = nil
case *map[int]int:
*v = nil
case *map[int]int32:
*v = nil
case *map[int]float64:
*v = nil
case *map[int]bool:
*v = nil
case *map[int32]interface{}:
*v = nil
case *map[int32]string:
*v = nil
case *map[int32][]byte:
*v = nil
case *map[int32]uint8:
*v = nil
case *map[int32]uint64:
*v = nil
case *map[int32]int:
*v = nil
case *map[int32]int32:
*v = nil
case *map[int32]float64:
*v = nil
case *map[int32]bool:
*v = nil
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build notfastpath || codec.notfastpath
// +build notfastpath codec.notfastpath
package codec
@ -18,24 +17,18 @@ const fastpathEnabled = false
// This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
// func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
// func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
type fastpathT struct{}
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
func fastpathAvIndex(rtid uintptr) (uint, bool) { return 0, false }
type fastpathRtRtid struct {
rtid uintptr
rt reflect.Type
}
type fastpathA [0]fastpathE
func fastpathAvIndex(rtid uintptr) int { return -1 }
type fastpathARtRtid [0]fastpathRtRtid
var fastpathAv fastpathA
var fastpathTV fastpathT
var fastpathAvRtRtid fastpathARtRtid

View file

@ -0,0 +1,26 @@
//go:build notfastpath || (codec.notfastpath && (notmono || codec.notmono))
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import "reflect"
// type fastpathT struct{}
type fastpathE[T encDriver] struct {
rt reflect.Type
encfn func(*encoder[T], *encFnInfo, reflect.Value)
}
type fastpathD[T decDriver] struct {
rt reflect.Type
decfn func(*decoder[T], *decFnInfo, reflect.Value)
}
type fastpathEs[T encDriver] [0]fastpathE[T]
type fastpathDs[T decDriver] [0]fastpathD[T]
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { return false }
func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool { return false }
func (helperEncDriver[T]) fastpathEList() (v *fastpathEs[T]) { return }
func (helperDecDriver[T]) fastpathDList() (v *fastpathDs[T]) { return }

194
vendor/github.com/ugorji/go/codec/binc.base.go generated vendored Normal file
View file

@ -0,0 +1,194 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
"time"
)
// Symbol management:
// - symbols are stored in a symbol map during encoding and decoding.
// - the symbols persist until the (En|De)coder ResetXXX method is called.
const bincDoPrune = true
// vd as low 4 bits (there are 16 slots)
const (
bincVdSpecial byte = iota
bincVdPosInt
bincVdNegInt
bincVdFloat
bincVdString
bincVdByteArray
bincVdArray
bincVdMap
bincVdTimestamp
bincVdSmallInt
_ // bincVdUnicodeOther
bincVdSymbol
_ // bincVdDecimal
_ // open slot
_ // open slot
bincVdCustomExt = 0x0f
)
const (
bincSpNil byte = iota
bincSpFalse
bincSpTrue
bincSpNan
bincSpPosInf
bincSpNegInf
bincSpZeroFloat
bincSpZero
bincSpNegOne
)
const (
_ byte = iota // bincFlBin16
bincFlBin32
_ // bincFlBin32e
bincFlBin64
_ // bincFlBin64e
// others not currently supported
)
const bincBdNil = 0 // bincVdSpecial<<4 | bincSpNil // staticcheck barfs on this (SA4016)
var (
bincdescSpecialVsNames = map[byte]string{
bincSpNil: "nil",
bincSpFalse: "false",
bincSpTrue: "true",
bincSpNan: "float",
bincSpPosInf: "float",
bincSpNegInf: "float",
bincSpZeroFloat: "float",
bincSpZero: "uint",
bincSpNegOne: "int",
}
bincdescVdNames = map[byte]string{
bincVdSpecial: "special",
bincVdSmallInt: "uint",
bincVdPosInt: "uint",
bincVdFloat: "float",
bincVdSymbol: "string",
bincVdString: "string",
bincVdByteArray: "bytes",
bincVdTimestamp: "time",
bincVdCustomExt: "ext",
bincVdArray: "array",
bincVdMap: "map",
}
)
func bincdescbd(bd byte) (s string) {
return bincdesc(bd>>4, bd&0x0f)
}
func bincdesc(vd, vs byte) (s string) {
if vd == bincVdSpecial {
s = bincdescSpecialVsNames[vs]
} else {
s = bincdescVdNames[vd]
}
if s == "" {
s = "unknown"
}
return
}
type bincEncState struct {
m map[string]uint16 // symbols
}
// func (e *bincEncState) restoreState(v interface{}) { e.m = v.(map[string]uint16) }
// func (e bincEncState) captureState() interface{} { return e.m }
// func (e *bincEncState) resetState() { e.m = nil }
// func (e *bincEncState) reset() { e.resetState() }
func (e *bincEncState) reset() { e.m = nil }
type bincDecState struct {
bdRead bool
bd byte
vd byte
vs byte
_ bool
// MARKER: consider using binary search here instead of a map (ie bincDecSymbol)
s map[uint16][]byte
}
// func (x bincDecState) captureState() interface{} { return x }
// func (x *bincDecState) resetState() { *x = bincDecState{} }
// func (x *bincDecState) reset() { x.resetState() }
// func (x *bincDecState) restoreState(v interface{}) { *x = v.(bincDecState) }
func (x *bincDecState) reset() { *x = bincDecState{} }
//------------------------------------
// BincHandle is a Handle for the Binc Schema-Free Encoding Format
// defined at https://github.com/ugorji/binc .
//
// BincHandle currently supports all Binc features with the following EXCEPTIONS:
// - only integers up to 64 bits of precision are supported.
// big integers are unsupported.
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
// extended precision and decimal IEEE 754 floats are unsupported.
// - Only UTF-8 strings supported.
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
//
// Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
type BincHandle struct {
binaryEncodingType
notJsonType
// noElemSeparators
BasicHandle
// AsSymbols defines what should be encoded as symbols.
//
// Encoding as symbols can reduce the encoded size significantly.
//
// However, during decoding, each string to be encoded as a symbol must
// be checked to see if it has been seen before. Consequently, encoding time
// will increase if using symbols, because string comparisons has a clear cost.
//
// Values:
// - 0: default: library uses best judgement
// - 1: use symbols
// - 2: do not use symbols
AsSymbols uint8
// AsSymbols: may later on introduce more options ...
// - m: map keys
// - s: struct fields
// - n: none
// - a: all: same as m, s, ...
// _ [7]uint64 // padding (cache-aligned)
}
// Name returns the name of the handle: binc
func (h *BincHandle) Name() string { return "binc" }
func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) }
// SetBytesExt sets an extension
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
func bincEncodeTime(t time.Time) []byte {
return customEncodeTime(t)
}
func bincDecodeTime(bs []byte) (tt time.Time, err error) {
return customDecodeTime(bs)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

8158
vendor/github.com/ugorji/go/codec/binc.mono.generated.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
type fastpathEBincBytes struct {
rt reflect.Type
encfn func(*encoderBincBytes, *encFnInfo, reflect.Value)
}
type fastpathDBincBytes struct {
rt reflect.Type
decfn func(*decoderBincBytes, *decFnInfo, reflect.Value)
}
type fastpathEsBincBytes [0]fastpathEBincBytes
type fastpathDsBincBytes [0]fastpathDBincBytes
func (helperEncDriverBincBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincBytes) bool {
return false
}
func (helperDecDriverBincBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincBytes) bool {
return false
}
func (helperEncDriverBincBytes) fastpathEList() (v *fastpathEsBincBytes) { return }
func (helperDecDriverBincBytes) fastpathDList() (v *fastpathDsBincBytes) { return }
type fastpathEBincIO struct {
rt reflect.Type
encfn func(*encoderBincIO, *encFnInfo, reflect.Value)
}
type fastpathDBincIO struct {
rt reflect.Type
decfn func(*decoderBincIO, *decFnInfo, reflect.Value)
}
type fastpathEsBincIO [0]fastpathEBincIO
type fastpathDsBincIO [0]fastpathDBincIO
func (helperEncDriverBincIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincIO) bool {
return false
}
func (helperDecDriverBincIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincIO) bool {
return false
}
func (helperEncDriverBincIO) fastpathEList() (v *fastpathEsBincIO) { return }
func (helperDecDriverBincIO) fastpathDList() (v *fastpathDsBincIO) { return }

View file

@ -1,232 +1,61 @@
#!/bin/bash
# Run all the different permutations of all the tests and other things
# This helps ensure that nothing gets broken.
# Build and Run the different test permutations.
# This helps validate that nothing gets broken.
_tests() {
local vet="" # TODO: make it off
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
case $gover in
go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;;
*) return 1
esac
# note that codecgen requires fastpath, so you cannot do "codecgen codec.notfastpath"
# we test the following permutations wnich all execute different code paths as below.
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe), (codecgen/unsafe)"
local echo=1
local nc=2 # count
local cpus="1,$(nproc)"
# if using the race detector, then set nc to
if [[ " ${zargs[@]} " =~ "-race" ]]; then
cpus="$(nproc)"
fi
local a=( "" "codec.notfastpath" "codec.safe" "codec.notfastpath codec.safe" "codecgen" )
local b=()
local c=()
for i in "${a[@]}"
do
local i2=${i:-default}
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
[[ "$zcover" == "1" ]] && c=( -coverprofile "${i2// /-}.cov.out" )
true &&
${gocmd} vet -printfuncs "errorf" "$@" &&
if [[ "$echo" == 1 ]]; then set -o xtrace; fi &&
${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" -count $nc -cpu $cpus -run "TestCodecSuite" "${c[@]}" "$@" &
if [[ "$echo" == 1 ]]; then set +o xtrace; fi
b+=("${i2// /-}.cov.out")
[[ "$zwait" == "1" ]] && wait
# if [[ "$?" != 0 ]]; then return 1; fi
_build_proceed() {
# return success (0) if we should, and 1 (fail) if not
if [[ "${zforce}" ]]; then return 0; fi
for a in "fastpath.generated.go" "json.mono.generated.go"; do
if [[ ! -e "$a" ]]; then return 0; fi
for b in `ls -1 *.go.tmpl gen.go gen_mono.go values_test.go`; do
if [[ "$a" -ot "$b" ]]; then return 0; fi
done
done
if [[ "$zextra" == "1" ]]; then
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'codec.notfastpath x'; RUN: 'Test.*X$'"
[[ "$zcover" == "1" ]] && c=( -coverprofile "x.cov.out" )
${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "codec.notfastpath x" -count $nc -run 'Test.*X$' "${c[@]}" &
b+=("x.cov.out")
[[ "$zwait" == "1" ]] && wait
fi
wait
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
[[ "$zcover" == "1" ]] &&
command -v gocovmerge &&
gocovmerge "${b[@]}" > __merge.cov.out &&
${gocmd} tool cover -html=__merge.cov.out
return 1
}
# is a generation needed?
_ng() {
local a="$1"
if [[ ! -e "$a" ]]; then echo 1; return; fi
for i in `ls -1 *.go.tmpl gen.go values_test.go`
do
if [[ "$a" -ot "$i" ]]; then echo 1; return; fi
done
}
_prependbt() {
cat > ${2} <<EOF
// +build generated
EOF
cat ${1} >> ${2}
rm -f ${1}
}
# _build generates fast-path.go and gen-helper.go.
# _build generates fastpath.go
_build() {
if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi
# if ! [[ "${zforce}" || $(_ng "fastpath.generated.go") || $(_ng "json.mono.generated.go") ]]; then return 0; fi
_build_proceed
if [ $? -eq 1 ]; then return 0; fi
if [ "${zbak}" ]; then
_zts=`date '+%m%d%Y_%H%M%S'`
_gg=".generated.go"
[ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
[ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
[ -e "fastpath${_gg}" ] && mv fastpath${_gg} fastpath${_gg}__${_zts}.bak
[ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak
fi
rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \
*safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
fi
rm -f fast*path.generated.go *mono*generated.go *_generated_test.go gen-from-tmpl*.generated.go
cat > gen.generated.go <<EOF
// +build codecgen.exec
local btags="codec.build codec.notmono codec.safe codec.notfastpath"
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = \`
EOF
cat >> gen.generated.go < gen-dec-map.go.tmpl
cat >> gen.generated.go <<EOF
\`
const genDecListTmpl = \`
EOF
cat >> gen.generated.go < gen-dec-array.go.tmpl
cat >> gen.generated.go <<EOF
\`
const genEncChanTmpl = \`
EOF
cat >> gen.generated.go < gen-enc-chan.go.tmpl
cat >> gen.generated.go <<EOF
\`
EOF
cat > gen-from-tmpl.codec.generated.go <<EOF
package codec
func GenRunTmpl2Go(in, out string) { genRunTmpl2Go(in, out) }
func GenRunSortTmpl2Go(in, out string) { genRunSortTmpl2Go(in, out) }
EOF
# stub xxxRv and xxxRvSlice creation, before you create it
cat > gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
// +build codecgen.sort_slice
package codec
import "reflect"
import "time"
func GenTmplRun2Go(in, out string) { genTmplRun2Go(in, out) }
func GenMonoAll() { genMonoAll() }
EOF
for i in string bool uint64 int64 float64 bytes time; do
local i2=$i
case $i in
'time' ) i2="time.Time";;
'bytes' ) i2="[]byte";;
esac
cat >> gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
type ${i}Rv struct { v ${i2}; r reflect.Value }
type ${i}RvSlice []${i}Rv
func (${i}RvSlice) Len() int { return 0 }
func (${i}RvSlice) Less(i, j int) bool { return false }
func (${i}RvSlice) Swap(i, j int) {}
type ${i}Intf struct { v ${i2}; i interface{} }
type ${i}IntfSlice []${i}Intf
func (${i}IntfSlice) Len() int { return 0 }
func (${i}IntfSlice) Less(i, j int) bool { return false }
func (${i}IntfSlice) Swap(i, j int) {}
cat > gen-from-tmpl.generated.go <<EOF
//go:build ignore
package main
import "${zpkg}"
func main() {
codec.GenTmplRun2Go("fastpath.go.tmpl", "base.fastpath.generated.go")
codec.GenTmplRun2Go("fastpath.notmono.go.tmpl", "base.fastpath.notmono.generated.go")
codec.GenTmplRun2Go("mammoth_test.go.tmpl", "mammoth_generated_test.go")
codec.GenMonoAll()
}
EOF
done
sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \
shared_test.go > bench/shared_test.go
# explicitly return 0 if this passes, else return 1
local btags="codec.notfastpath codec.safe codecgen.exec"
rm -f sort-slice.generated.go fast-path.generated.go gen-helper.generated.go mammoth_generated_test.go mammoth2_generated_test.go
cat > gen-from-tmpl.sort-slice.generated.go <<EOF
// +build ignore
package main
import "${zpkg}"
func main() {
codec.GenRunSortTmpl2Go("sort-slice.go.tmpl", "sort-slice.generated.go")
}
EOF
${gocmd} run -tags "$btags codecgen.sort_slice" gen-from-tmpl.sort-slice.generated.go || return 1
rm -f gen-from-tmpl.sort-slice.generated.go
cat > gen-from-tmpl.generated.go <<EOF
// +build ignore
package main
import "${zpkg}"
func main() {
codec.GenRunTmpl2Go("fast-path.go.tmpl", "fast-path.generated.go")
codec.GenRunTmpl2Go("gen-helper.go.tmpl", "gen-helper.generated.go")
codec.GenRunTmpl2Go("mammoth-test.go.tmpl", "mammoth_generated_test.go")
codec.GenRunTmpl2Go("mammoth2-test.go.tmpl", "mammoth2_generated_test.go")
}
EOF
${gocmd} run -tags "$btags" gen-from-tmpl.generated.go || return 1
rm -f gen-from-tmpl.generated.go
rm -f gen-from-tmpl.*generated.go
rm -f gen-from-tmpl*.generated.go
return 0
}
_codegenerators() {
local c5="_generated_test.go"
local c7="$PWD/codecgen"
local c8="$c7/__codecgen"
local c9="codecgen-scratch.go"
if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi
# Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen
true &&
echo "codecgen ... " &&
if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then
echo "rebuilding codecgen ... " && ( cd codecgen && ${gocmd} build -o $c8 ${zargs[*]} . )
fi &&
$c8 -rt 'codecgen' -t 'codecgen generated' -o "values_codecgen${c5}" -d 19780 "$zfin" "$zfin2" &&
cp mammoth2_generated_test.go $c9 &&
$c8 -t 'codecgen,!codec.notfastpath,!codec.notmammoth generated,!codec.notfastpath,!codec.notmammoth' -o "mammoth2_codecgen${c5}" -d 19781 "mammoth2_generated_test.go" &&
rm -f $c9 &&
echo "generators done!"
}
_prebuild() {
echo "prebuild: zforce: $zforce"
local d="$PWD"
local zfin="test_values.generated.go"
local zfin2="test_values_flex.generated.go"
@ -236,13 +65,12 @@ _prebuild() {
# zpkg=${d##*/src/}
# zgobase=${d%%/src/*}
# rm -f *_generated_test.go
rm -f codecgen-*.go &&
# if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
true &&
_build &&
cp $d/values_test.go $d/$zfin &&
cp $d/values_flex_test.go $d/$zfin2 &&
_codegenerators &&
if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi &&
if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
returncode=0 &&
echo "prebuild done successfully"
rm -f $d/$zfin $d/$zfin2
@ -251,54 +79,67 @@ _prebuild() {
}
_make() {
local makeforce=${zforce}
zforce=1
(cd codecgen && ${gocmd} install ${zargs[*]} .) && _prebuild && ${gocmd} install ${zargs[*]} .
zforce=${makeforce}
_prebuild && ${gocmd} install ${zargs[*]} .
}
_clean() {
rm -f \
gen-from-tmpl.*generated.go \
codecgen-*.go \
test_values.generated.go test_values_flex.generated.go
}
_release() {
local reply
read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply
echo
if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi
_tests_run_one() {
local tt="alltests $i"
local rr="TestCodecSuite"
if [[ "x$i" == "xx" ]]; then tt="codec.notmono codec.notfastpath x"; rr='Test.*X$'; fi
local g=( ${zargs[*]} ${ztestargs[*]} -count $nc -cpu $cpus -vet "$vet" -tags "$tt" -run "$rr" )
[[ "$zcover" == "1" ]] && g+=( -cover )
# g+=( -ti "$k" )
g+=( -tdiff )
[[ "$zcover" == "1" ]] && g+=( -test.gocoverdir $covdir )
local -
set -x
${gocmd} test "${g[@]}" &
}
# expects GOROOT, GOROOT_BOOTSTRAP to have been set.
if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi
# (cd $GOROOT && git checkout -f master && git pull && git reset --hard)
(cd $GOROOT && git pull)
local f=`pwd`/make.release.out
cat > $f <<EOF
========== `date` ===========
EOF
# # go 1.6 and below kept giving memory errors on Mac OS X during SDK build or go run execution,
# # that is fine, as we only explicitly test the last 3 releases and tip (2 years).
local makeforce=${zforce}
zforce=1
for i in 1.10 1.11 1.12 master
do
echo "*********** $i ***********" >>$f
if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi
(false ||
(echo "===== BUILDING GO SDK for branch: $i ... =====" &&
cd $GOROOT &&
git checkout -f $i && git reset --hard && git clean -f . &&
cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) &&
echo "===== GO SDK BUILD DONE =====" &&
_prebuild &&
echo "===== PREBUILD DONE with exit: $? =====" &&
_tests "$@"
if [[ "$?" != 0 ]]; then return 1; fi
_tests() {
local vet="" # TODO: make it off
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
case $gover in
go1.2[0-9]*|go2.*|devel*) true ;;
*) return 1
esac
# we test the following permutations wnich all execute different code paths as below.
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe)"
local nc=2 # count
local cpus="1,$(nproc)"
# if using the race detector, then set nc to
if [[ " ${zargs[@]} " =~ "-race" ]]; then
cpus="$(nproc)"
fi
local covdir=""
local a=( "" "codec.safe" "codec.notfastpath" "codec.safe codec.notfastpath"
"codec.notmono" "codec.notmono codec.safe"
"codec.notmono codec.notfastpath" "codec.notmono codec.safe codec.notfastpath" )
[[ "$zextra" == "1" ]] && a+=( "x" )
[[ "$zcover" == "1" ]] && covdir=`mktemp -d`
${gocmd} vet -printfuncs "errorf" "$@" || return 1
for i in "${a[@]}"; do
local j=${i:-default}; j="${j// /-}"; j="${j//codec./}"
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
_tests_run_one
[[ "$zwait" == "1" ]] && wait
# if [[ "$?" != 0 ]]; then return 1; fi
done
zforce=${makeforce}
echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++"
wait
[[ "$zcover" == "1" ]] &&
echo "go tool covdata output" &&
${gocmd} tool covdata percent -i $covdir &&
${gocmd} tool covdata textfmt -i $covdir -o __cov.out &&
${gocmd} tool cover -html=__cov.out
}
_usage() {
@ -306,11 +147,10 @@ _usage() {
# -pf [p=prebuild (f=force)]
cat <<EOF
primary usage: $0
primary usage: $0
-t[esow] -> t=tests [e=extra, s=short, o=cover, w=wait]
-[md] -> [m=make, d=race detector]
-[n l i] -> [n=inlining diagnostics, l=mid-stack inlining, i=check inlining for path (path)]
-v -> v=verbose
-v -> v=verbose (more v's to increase verbose level)
EOF
if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi
}
@ -331,15 +171,15 @@ _main() {
local gocmd=${MYGOCMD:-go}
OPTIND=1
while getopts ":cetmnrgpfvldsowkxyzi" flag
while getopts ":cetmnrgpfvldsowikxyz" flag
do
case "x$flag" in
'xw') zwait=1 ;;
'xv') zverbose+=(1) ;;
'xo') zcover=1 ;;
'xe') zextra=1 ;;
'xw') zwait=1 ;;
'xf') zforce=1 ;;
'xs') ztestargs+=("-short") ;;
'xv') zverbose+=(1) ;;
'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;;
'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;;
'xd') zargs+=("-race") ;;
@ -357,14 +197,23 @@ _main() {
'xg') _go ;;
'xp') _prebuild "$@" ;;
'xc') _clean "$@" ;;
esac
# handle from local run.sh
case "x$x" in
'xi') _check_inlining_one "$@" ;;
'xk') _go_compiler_validation_suite ;;
'xx') _analyze_checks "$@" ;;
'xy') _analyze_debug_types "$@" ;;
'xz') _analyze_do_inlining_and_more "$@" ;;
'xk') _go_compiler_validation_suite ;;
'xi') _check_inlining_one "$@" ;;
esac
# unset zforce zargs zbenchflags
}
[ "." = `dirname $0` ] && _main "$@"
# _xtrace() {
# local -
# set -x
# "${@}"
# }

160
vendor/github.com/ugorji/go/codec/cbor.base.go generated vendored Normal file
View file

@ -0,0 +1,160 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
// major
const (
cborMajorUint byte = iota
cborMajorNegInt
cborMajorBytes
cborMajorString
cborMajorArray
cborMajorMap
cborMajorTag
cborMajorSimpleOrFloat
)
// simple
const (
cborBdFalse byte = 0xf4 + iota
cborBdTrue
cborBdNil
cborBdUndefined
cborBdExt
cborBdFloat16
cborBdFloat32
cborBdFloat64
)
// indefinite
const (
cborBdIndefiniteBytes byte = 0x5f
cborBdIndefiniteString byte = 0x7f
cborBdIndefiniteArray byte = 0x9f
cborBdIndefiniteMap byte = 0xbf
cborBdBreak byte = 0xff
)
// These define some in-stream descriptors for
// manual encoding e.g. when doing explicit indefinite-length
const (
CborStreamBytes byte = 0x5f
CborStreamString byte = 0x7f
CborStreamArray byte = 0x9f
CborStreamMap byte = 0xbf
CborStreamBreak byte = 0xff
)
// base values
const (
cborBaseUint byte = 0x00
cborBaseNegInt byte = 0x20
cborBaseBytes byte = 0x40
cborBaseString byte = 0x60
cborBaseArray byte = 0x80
cborBaseMap byte = 0xa0
cborBaseTag byte = 0xc0
cborBaseSimple byte = 0xe0
)
// const (
// cborSelfDesrTag byte = 0xd9
// cborSelfDesrTag2 byte = 0xd9
// cborSelfDesrTag3 byte = 0xf7
// )
var (
cbordescSimpleNames = map[byte]string{
cborBdNil: "nil",
cborBdFalse: "false",
cborBdTrue: "true",
cborBdFloat16: "float",
cborBdFloat32: "float",
cborBdFloat64: "float",
cborBdBreak: "break",
}
cbordescIndefNames = map[byte]string{
cborBdIndefiniteBytes: "bytes*",
cborBdIndefiniteString: "string*",
cborBdIndefiniteArray: "array*",
cborBdIndefiniteMap: "map*",
}
cbordescMajorNames = map[byte]string{
cborMajorUint: "(u)int",
cborMajorNegInt: "int",
cborMajorBytes: "bytes",
cborMajorString: "string",
cborMajorArray: "array",
cborMajorMap: "map",
cborMajorTag: "tag",
cborMajorSimpleOrFloat: "simple",
}
)
func cbordesc(bd byte) (s string) {
bm := bd >> 5
if bm == cborMajorSimpleOrFloat {
s = cbordescSimpleNames[bd]
} else {
s = cbordescMajorNames[bm]
if s == "" {
s = cbordescIndefNames[bd]
}
}
if s == "" {
s = "unknown"
}
return
}
// -------------------------
// CborHandle is a Handle for the CBOR encoding format,
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
//
// CBOR is comprehensively supported, including support for:
// - indefinite-length arrays/maps/bytes/strings
// - (extension) tags in range 0..0xffff (0 .. 65535)
// - half, single and double-precision floats
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
// - nil, true, false, ...
// - arrays and maps, bytes and text strings
//
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
// Users can implement them as needed (using SetExt), including spec-documented ones:
// - timestamp, BigNum, BigFloat, Decimals,
// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
type CborHandle struct {
binaryEncodingType
notJsonType
// noElemSeparators
BasicHandle
// IndefiniteLength=true, means that we encode using indefinitelength
IndefiniteLength bool
// TimeRFC3339 says to encode time.Time using RFC3339 format.
// If unset, we encode time.Time using seconds past epoch.
TimeRFC3339 bool
// SkipUnexpectedTags says to skip over any tags for which extensions are
// not defined. This is in keeping with the cbor spec on "Optional Tagging of Items".
//
// Furthermore, this allows the skipping over of the Self Describing Tag 0xd9d9f7.
SkipUnexpectedTags bool
}
// Name returns the name of the handle: cbor
func (h *CborHandle) Name() string { return "cbor" }
func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) }
// SetInterfaceExt sets an extension
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

7985
vendor/github.com/ugorji/go/codec/cbor.mono.generated.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
type fastpathECborBytes struct {
rt reflect.Type
encfn func(*encoderCborBytes, *encFnInfo, reflect.Value)
}
type fastpathDCborBytes struct {
rt reflect.Type
decfn func(*decoderCborBytes, *decFnInfo, reflect.Value)
}
type fastpathEsCborBytes [0]fastpathECborBytes
type fastpathDsCborBytes [0]fastpathDCborBytes
func (helperEncDriverCborBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborBytes) bool {
return false
}
func (helperDecDriverCborBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborBytes) bool {
return false
}
func (helperEncDriverCborBytes) fastpathEList() (v *fastpathEsCborBytes) { return }
func (helperDecDriverCborBytes) fastpathDList() (v *fastpathDsCborBytes) { return }
type fastpathECborIO struct {
rt reflect.Type
encfn func(*encoderCborIO, *encFnInfo, reflect.Value)
}
type fastpathDCborIO struct {
rt reflect.Type
decfn func(*decoderCborIO, *decFnInfo, reflect.Value)
}
type fastpathEsCborIO [0]fastpathECborIO
type fastpathDsCborIO [0]fastpathDCborIO
func (helperEncDriverCborIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborIO) bool {
return false
}
func (helperDecDriverCborIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborIO) bool {
return false
}
func (helperEncDriverCborIO) fastpathEList() (v *fastpathEsCborIO) { return }
func (helperDecDriverCborIO) fastpathDList() (v *fastpathDsCborIO) { return }

View file

@ -1,17 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build codecgen || generated
// +build codecgen generated
package codec
// this file sets the codecgen variable to true
// when the build tag codecgen is set.
//
// some tests depend on knowing whether in the context of codecgen or not.
// For example, some tests should be skipped during codecgen e.g. missing fields tests.
func init() {
codecgen = true
}

191
vendor/github.com/ugorji/go/codec/custom_time.go generated vendored Normal file
View file

@ -0,0 +1,191 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math"
"time"
)
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
func customEncodeTime(t time.Time) []byte {
// t := rv2i(rv).(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
btmp := bigen.PutUint64(uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
btmp := bigen.PutUint32(uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
// zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
btmp0, btmp1 := bigen.PutUint16(z)
// clear dst flags
bs[i] = btmp0 & 0x3f
bs[i+1] = btmp1
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// customDecodeTime decodes a []byte into a time.Time.
func customDecodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
// if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
}
i = i2
tsec = int64(bigen.Uint64(btmp))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp)
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printf.d.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
tz = bigen.Uint16([2]byte{bs[i], bs[i+1]})
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
// customEncodeTimeAsNum encodes time.Time exactly as cbor does.
func customEncodeTimeAsNum(t time.Time) (r interface{}) {
t = t.UTC().Round(time.Microsecond)
sec, nsec := t.Unix(), uint64(t.Nanosecond())
if nsec == 0 {
r = sec
} else {
r = float64(sec) + float64(nsec)/1e9
}
return r
}
// customDecodeTimeAsNum decodes time.Time exactly as cbor does.
func customDecodeTimeAsNum(v interface{}) (t time.Time) {
switch vv := v.(type) {
case int64:
t = time.Unix(vv, 0)
case uint64:
t = time.Unix((int64)(vv), 0)
case float64:
f1, f2 := math.Modf(vv)
t = time.Unix(int64(f1), int64(f2*1e9))
default:
halt.errorf("expect int64/float64 for time.Time ext: got %T", v)
}
t = t.UTC().Round(time.Microsecond)
return
}

View file

@ -8,6 +8,19 @@ import (
"strconv"
)
type readFloatResult struct {
mantissa uint64
exp int8
neg bool
trunc bool
bad bool // bad decimal string
hardexp bool // exponent is hard to handle (> 2 digits, etc)
ok bool
// sawdot bool
// sawexp bool
//_ [2]bool // padding
}
// Per go spec, floats are represented in memory as
// IEEE single or double precision floating point values.
//
@ -234,6 +247,10 @@ func parseFloat64_custom(b []byte) (f float64, err error) {
}
func parseUint64_simple(b []byte) (n uint64, ok bool) {
if len(b) > 1 && b[0] == '0' { // punt on numbers with leading zeros
return
}
var i int
var n1 uint64
var c uint8
@ -356,19 +373,6 @@ func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) {
return
}
type readFloatResult struct {
mantissa uint64
exp int8
neg bool
trunc bool
bad bool // bad decimal string
hardexp bool // exponent is hard to handle (> 2 digits, etc)
ok bool
// sawdot bool
// sawexp bool
//_ [2]bool // padding
}
func readFloat(s []byte, y floatinfo) (r readFloatResult) {
var i uint // uint, so that we eliminate bounds checking
var slen = uint(len(s))
@ -384,13 +388,23 @@ func readFloat(s []byte, y floatinfo) (r readFloatResult) {
i++
}
// we considered punting early if string has length > maxMantDigits, but this doesn't account
// considered punting early if string has length > maxMantDigits, but doesn't account
// for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
var nd, ndMant, dp int8
var sawdot, sawexp bool
var xu uint64
if i+1 < slen && s[i] == '0' {
switch s[i+1] {
case '.', 'e', 'E':
// ok
default:
r.bad = true
return
}
}
LOOP:
for ; i < slen; i++ {
switch s[i] {

944
vendor/github.com/ugorji/go/codec/decode.base.go generated vendored Normal file
View file

@ -0,0 +1,944 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"errors"
"io"
"math"
"reflect"
"slices"
"sync"
"time"
)
func init() {
for _, v := range []interface{}{
(*string)(nil),
(*bool)(nil),
(*int)(nil),
(*int8)(nil),
(*int16)(nil),
(*int32)(nil),
(*int64)(nil),
(*uint)(nil),
(*uint8)(nil),
(*uint16)(nil),
(*uint32)(nil),
(*uint64)(nil),
(*uintptr)(nil),
(*float32)(nil),
(*float64)(nil),
(*complex64)(nil),
(*complex128)(nil),
(*[]byte)(nil),
([]byte)(nil),
(*time.Time)(nil),
(*Raw)(nil),
(*interface{})(nil),
} {
decBuiltinRtids = append(decBuiltinRtids, i2rtid(v))
}
slices.Sort(decBuiltinRtids)
}
const msgBadDesc = "unrecognized descriptor byte"
var decBuiltinRtids []uintptr
// decDriver calls (DecodeBytes and DecodeStringAsBytes) return a state
// of the view they return, allowing consumers to handle appropriately.
//
// sequencing of this is intentional:
// - mutable if <= dBytesAttachBuffer (buf | view | invalid)
// - noCopy if >= dBytesAttachViewZerocopy
type dBytesAttachState uint8
const (
dBytesAttachInvalid dBytesAttachState = iota
dBytesAttachView // (bytes && !zerocopy && !buf)
dBytesAttachBuffer // (buf)
dBytesAttachViewZerocopy // (bytes && zerocopy && !buf)
dBytesDetach // (!bytes && !buf)
)
type dBytesIntoState uint8
const (
dBytesIntoNoChange dBytesIntoState = iota
dBytesIntoParamOut
dBytesIntoParamOutSlice
dBytesIntoNew
)
func (x dBytesAttachState) String() string {
switch x {
case dBytesAttachInvalid:
return "invalid"
case dBytesAttachView:
return "view"
case dBytesAttachBuffer:
return "buffer"
case dBytesAttachViewZerocopy:
return "view-zerocopy"
case dBytesDetach:
return "detach"
}
return "unknown"
}
const (
decDefMaxDepth = 1024 // maximum depth
decDefChanCap = 64 // should be large, as cap cannot be expanded
decScratchByteArrayLen = (4 + 3) * 8 // around cacheLineSize ie ~64, depending on Decoder size
// MARKER: massage decScratchByteArrayLen to ensure xxxDecDriver structs fit within cacheLine*N
// decFailNonEmptyIntf configures whether we error
// when decoding naked into a non-empty interface.
//
// Typically, we cannot decode non-nil stream value into
// nil interface with methods (e.g. io.Reader).
// However, in some scenarios, this should be allowed:
// - MapType
// - SliceType
// - Extensions
//
// Consequently, we should relax this. Put it behind a const flag for now.
decFailNonEmptyIntf = false
// decUseTransient says whether we should use the transient optimization.
//
// There's potential for GC corruption or memory overwrites if transient isn't
// used carefully, so this flag helps turn it off quickly if needed.
//
// Use it everywhere needed so we can completely remove unused code blocks.
decUseTransient = true
)
var (
errNeedMapOrArrayDecodeToStruct = errors.New("only encoded map or array can decode into struct")
errCannotDecodeIntoNil = errors.New("cannot decode into nil")
errExpandSliceCannotChange = errors.New("expand slice: cannot change")
errDecoderNotInitialized = errors.New("Decoder not initialized")
errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read")
errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
errMaxDepthExceeded = errors.New("maximum decoding depth exceeded")
)
type decNotDecodeableReason uint8
const (
decNotDecodeableReasonUnknown decNotDecodeableReason = iota
decNotDecodeableReasonBadKind
decNotDecodeableReasonNonAddrValue
decNotDecodeableReasonNilReference
)
type decDriverI interface {
// this will check if the next token is a break.
CheckBreak() bool
// TryNil tries to decode as nil.
// If a nil is in the stream, it consumes it and returns true.
//
// Note: if TryNil returns true, that must be handled.
TryNil() bool
// ContainerType returns one of: Bytes, String, Nil, Slice or Map.
//
// Return unSet if not known.
//
// Note: Implementations MUST fully consume sentinel container types, specifically Nil.
ContainerType() (vt valueType)
// DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
// For maps and arrays, it will not do the decoding in-band, but will signal
// the decoder, so that is done later, by setting the fauxUnion.valueType field.
//
// Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
// for extensions, DecodeNaked must read the tag and the []byte if it exists.
// if the []byte is not read, then kInterfaceNaked will treat it as a Handle
// that stores the subsequent value in-band, and complete reading the RawExt.
//
// extensions should also use readx to decode them, for efficiency.
// kInterface will extract the detached byte slice if it has to pass it outside its realm.
DecodeNaked()
DecodeInt64() (i int64)
DecodeUint64() (ui uint64)
DecodeFloat32() (f float32)
DecodeFloat64() (f float64)
DecodeBool() (b bool)
// DecodeStringAsBytes returns the bytes representing a string.
// It will return a view into scratch buffer or input []byte (if applicable).
//
// Note: This can also decode symbols, if supported.
//
// Users should consume it right away and not store it for later use.
DecodeStringAsBytes() (v []byte, state dBytesAttachState)
// DecodeBytes returns the bytes representing a binary value.
// It will return a view into scratch buffer or input []byte (if applicable).
DecodeBytes() (out []byte, state dBytesAttachState)
// DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
// DecodeExt will decode into an extension.
// ext is never nil.
DecodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
// decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
// DecodeRawExt will decode into a *RawExt
DecodeRawExt(re *RawExt)
DecodeTime() (t time.Time)
// ReadArrayStart will return the length of the array.
// If the format doesn't prefix the length, it returns containerLenUnknown.
// If the expected array was a nil in the stream, it returns containerLenNil.
ReadArrayStart() int
// ReadMapStart will return the length of the array.
// If the format doesn't prefix the length, it returns containerLenUnknown.
// If the expected array was a nil in the stream, it returns containerLenNil.
ReadMapStart() int
decDriverContainerTracker
reset()
// atEndOfDecode()
// nextValueBytes will return the bytes representing the next value in the stream.
// It generally will include the last byte read, as that is a part of the next value
// in the stream.
nextValueBytes() []byte
// descBd will describe the token descriptor that signifies what type was decoded
descBd() string
// isBytes() bool
resetInBytes(in []byte)
resetInIO(r io.Reader)
NumBytesRead() int
init(h Handle, shared *decoderBase, dec decoderI) (fp interface{})
// driverStateManager
decNegintPosintFloatNumber
}
type decInit2er struct{}
func (decInit2er) init2(dec decoderI) {}
type decDriverContainerTracker interface {
ReadArrayElem(firstTime bool)
ReadMapElemKey(firstTime bool)
ReadMapElemValue()
ReadArrayEnd()
ReadMapEnd()
}
type decNegintPosintFloatNumber interface {
decInteger() (ui uint64, neg, ok bool)
decFloat() (f float64, ok bool)
}
type decDriverNoopNumberHelper struct{}
func (x decDriverNoopNumberHelper) decInteger() (ui uint64, neg, ok bool) {
panic("decInteger unsupported")
}
func (x decDriverNoopNumberHelper) decFloat() (f float64, ok bool) { panic("decFloat unsupported") }
type decDriverNoopContainerReader struct{}
func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { panic("ReadArrayStart unsupported") }
func (x decDriverNoopContainerReader) ReadMapStart() (v int) { panic("ReadMapStart unsupported") }
func (x decDriverNoopContainerReader) ReadArrayEnd() {}
func (x decDriverNoopContainerReader) ReadMapEnd() {}
func (x decDriverNoopContainerReader) ReadArrayElem(firstTime bool) {}
func (x decDriverNoopContainerReader) ReadMapElemKey(firstTime bool) {}
func (x decDriverNoopContainerReader) ReadMapElemValue() {}
func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return }
// ----
type decFnInfo struct {
ti *typeInfo
xfFn Ext
xfTag uint64
addrD bool // decoding into a pointer is preferred
addrDf bool // force: if addrD, then decode function MUST take a ptr
}
// DecodeOptions captures configuration options during decode.
type DecodeOptions struct {
// MapType specifies type to use during schema-less decoding of a map in the stream.
// If nil (unset), we default to map[string]interface{} iff json handle and MapKeyAsString=true,
// else map[interface{}]interface{}.
MapType reflect.Type
// SliceType specifies type to use during schema-less decoding of an array in the stream.
// If nil (unset), we default to []interface{} for all formats.
SliceType reflect.Type
// MaxInitLen defines the maxinum initial length that we "make" a collection
// (string, slice, map, chan). If 0 or negative, we default to a sensible value
// based on the size of an element in the collection.
//
// For example, when decoding, a stream may say that it has 2^64 elements.
// We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
// Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
MaxInitLen int
// ReaderBufferSize is the size of the buffer used when reading.
//
// if > 0, we use a smart buffer internally for performance purposes.
ReaderBufferSize int
// MaxDepth defines the maximum depth when decoding nested
// maps and slices. If 0 or negative, we default to a suitably large number (currently 1024).
MaxDepth int16
// If ErrorIfNoField, return an error when decoding a map
// from a codec stream into a struct, and no matching struct field is found.
ErrorIfNoField bool
// If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
// For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
// or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
ErrorIfNoArrayExpand bool
// If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
SignedInteger bool
// MapValueReset controls how we decode into a map value.
//
// By default, we MAY retrieve the mapping for a key, and then decode into that.
// However, especially with big maps, that retrieval may be expensive and unnecessary
// if the stream already contains all that is necessary to recreate the value.
//
// If true, we will never retrieve the previous mapping,
// but rather decode into a new value and set that in the map.
//
// If false, we will retrieve the previous mapping if necessary e.g.
// the previous mapping is a pointer, or is a struct or array with pre-set state,
// or is an interface.
MapValueReset bool
// SliceElementReset: on decoding a slice, reset the element to a zero value first.
//
// concern: if the slice already contained some garbage, we will decode into that garbage.
SliceElementReset bool
// InterfaceReset controls how we decode into an interface.
//
// By default, when we see a field that is an interface{...},
// or a map with interface{...} value, we will attempt decoding into the
// "contained" value.
//
// However, this prevents us from reading a string into an interface{}
// that formerly contained a number.
//
// If true, we will decode into a new "blank" value, and set that in the interface.
// If false, we will decode into whatever is contained in the interface.
InterfaceReset bool
// InternString controls interning of strings during decoding.
//
// Some handles, e.g. json, typically will read map keys as strings.
// If the set of keys are finite, it may help reduce allocation to
// look them up from a map (than to allocate them afresh).
//
// Note: Handles will be smart when using the intern functionality.
// Every string should not be interned.
// An excellent use-case for interning is struct field names,
// or map keys where key type is string.
InternString bool
// PreferArrayOverSlice controls whether to decode to an array or a slice.
//
// This only impacts decoding into a nil interface{}.
//
// Consequently, it has no effect on codecgen.
//
// *Note*: This only applies if using go1.5 and above,
// as it requires reflect.ArrayOf support which was absent before go1.5.
PreferArrayOverSlice bool
// DeleteOnNilMapValue controls how to decode a nil value in the stream.
//
// If true, we will delete the mapping of the key.
// Else, just set the mapping to the zero value of the type.
//
// Deprecated: This does NOTHING and is left behind for compiling compatibility.
// This change is necessitated because 'nil' in a stream now consistently
// means the zero value (ie reset the value to its zero state).
DeleteOnNilMapValue bool
// RawToString controls how raw bytes in a stream are decoded into a nil interface{}.
// By default, they are decoded as []byte, but can be decoded as string (if configured).
RawToString bool
// ZeroCopy controls whether decoded values of []byte or string type
// point into the input []byte parameter passed to a NewDecoderBytes/ResetBytes(...) call.
//
// To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer),
// then a []byte or string in the output result may just be a slice of (point into)
// the input bytes.
//
// This optimization prevents unnecessary copying.
//
// However, it is made optional, as the caller MUST ensure that the input parameter []byte is
// not modified after the Decode() happens, as any changes are mirrored in the decoded result.
ZeroCopy bool
// PreferPointerForStructOrArray controls whether a struct or array
// is stored in a nil interface{}, or a pointer to it.
//
// This mostly impacts when we decode registered extensions.
PreferPointerForStructOrArray bool
// ValidateUnicode controls will cause decoding to fail if an expected unicode
// string is well-formed but include invalid codepoints.
//
// This could have a performance impact.
ValidateUnicode bool
}
// ----------------------------------------
type decoderBase struct {
perType decPerType
h *BasicHandle
rtidFn, rtidFnNoExt *atomicRtidFnSlice
buf []byte
// used for interning strings
is internerMap
err error
// sd decoderI
blist bytesFreeList
mtr bool // is maptype a known type?
str bool // is slicetype a known type?
jsms bool // is json handle, and MapKeyAsString
bytes bool // uses a bytes reader
bufio bool // uses a ioDecReader with buffer size > 0
// ---- cpu cache line boundary?
// ---- writable fields during execution --- *try* to keep in sep cache line
maxdepth int16
depth int16
// Extensions can call Decode() within a current Decode() call.
// We need to know when the top level Decode() call returns,
// so we can decide whether to Release() or not.
calls uint16 // what depth in mustDecode are we in now.
c containerState
// decByteState
n fauxUnion
// b is an always-available scratch buffer used by Decoder and decDrivers.
// By being always-available, it can be used for one-off things without
// having to get from freelist, use, and return back to freelist.
//
// Use it for a narrow set of things e.g.
// - binc uses it for parsing numbers, represented at 8 or less bytes
// - uses as potential buffer for struct field names
b [decScratchByteArrayLen]byte
hh Handle
// cache the mapTypeId and sliceTypeId for faster comparisons
mtid uintptr
stid uintptr
}
func (d *decoderBase) maxInitLen() uint {
return uint(max(1024, d.h.MaxInitLen))
}
func (d *decoderBase) naked() *fauxUnion {
return &d.n
}
func (d *decoderBase) fauxUnionReadRawBytes(dr decDriverI, asString, rawToString bool) { //, handleZeroCopy bool) {
// fauxUnion is only used within DecodeNaked calls; consequently, we should try to intern.
d.n.l, d.n.a = dr.DecodeBytes()
if asString || rawToString {
d.n.v = valueTypeString
d.n.s = d.detach2Str(d.n.l, d.n.a)
} else {
d.n.v = valueTypeBytes
d.n.l = d.detach2Bytes(d.n.l, d.n.a)
}
}
// Return a fixed (detached) string representation of a []byte.
//
// Possibly get an interned version of a string,
// iff InternString=true and decoding a map key.
//
// This should mostly be used for map keys, struct field names, etc
// where the key type is string. This is because keys of a map/struct are
// typically reused across many objects.
func (d *decoderBase) detach2Str(v []byte, state dBytesAttachState) (s string) {
// note: string([]byte) checks - and optimizes - for len 0 and len 1
if len(v) <= 1 {
s = string(v)
} else if state >= dBytesAttachViewZerocopy { // !scratchBuf && d.bytes && d.h.ZeroCopy
s = stringView(v)
} else if d.is == nil || d.c != containerMapKey || len(v) > internMaxStrLen {
s = string(v)
} else {
s = d.is.string(v)
}
return
}
func (d *decoderBase) usableStructFieldNameBytes(buf, v []byte, state dBytesAttachState) (out []byte) {
// In JSON, mapElemValue reads a colon and spaces.
// In bufio mode of ioDecReader, fillbuf could overwrite the read buffer
// which readXXX() calls return sub-slices from.
//
// Consequently, we detach the bytes in this special case.
//
// Note: ioDecReader (non-bufio) and bytesDecReader do not have
// this issue (as no fillbuf exists where bytes might be returned from).
if d.bufio && d.h.jsonHandle && state < dBytesAttachViewZerocopy {
if cap(buf) > len(v) {
out = buf[:len(v)]
} else if len(d.b) > len(v) {
out = d.b[:len(v)]
} else {
out = make([]byte, len(v), max(64, len(v)))
}
copy(out, v)
return
}
return v
}
func (d *decoderBase) detach2Bytes(in []byte, state dBytesAttachState) (out []byte) {
if cap(in) == 0 || state >= dBytesAttachViewZerocopy {
return in
}
if len(in) == 0 {
return zeroByteSlice
}
out = make([]byte, len(in))
copy(out, in)
return out
}
func (d *decoderBase) attachState(usingBufFromReader bool) (r dBytesAttachState) {
if usingBufFromReader {
r = dBytesAttachBuffer
} else if !d.bytes {
r = dBytesDetach
} else if d.h.ZeroCopy {
r = dBytesAttachViewZerocopy
} else {
r = dBytesAttachView
}
return
}
func (d *decoderBase) mapStart(v int) int {
if v != containerLenNil {
d.depthIncr()
d.c = containerMapStart
}
return v
}
func (d *decoderBase) HandleName() string {
return d.hh.Name()
}
func (d *decoderBase) isBytes() bool {
return d.bytes
}
type decoderI interface {
Decode(v interface{}) (err error)
HandleName() string
MustDecode(v interface{})
NumBytesRead() int
Release() // deprecated
Reset(r io.Reader)
ResetBytes(in []byte)
ResetString(s string)
isBytes() bool
wrapErr(v error, err *error)
swallow()
nextValueBytes() []byte // wrapper method, for use in tests
// getDecDriver() decDriverI
decode(v interface{})
decodeAs(v interface{}, t reflect.Type, ext bool)
interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt)
}
var errDecNoResetBytesWithReader = errors.New("cannot reset an Decoder reading from []byte with a io.Reader")
var errDecNoResetReaderWithBytes = errors.New("cannot reset an Decoder reading from io.Reader with a []byte")
func setZero(iv interface{}) {
rv, isnil := isNil(iv, false)
if isnil {
return
}
if !rv.IsValid() {
rv = reflect.ValueOf(iv)
}
if isnilBitset.isset(byte(rv.Kind())) && rvIsNil(rv) {
return
}
// var canDecode bool
switch v := iv.(type) {
case *string:
*v = ""
case *bool:
*v = false
case *int:
*v = 0
case *int8:
*v = 0
case *int16:
*v = 0
case *int32:
*v = 0
case *int64:
*v = 0
case *uint:
*v = 0
case *uint8:
*v = 0
case *uint16:
*v = 0
case *uint32:
*v = 0
case *uint64:
*v = 0
case *float32:
*v = 0
case *float64:
*v = 0
case *complex64:
*v = 0
case *complex128:
*v = 0
case *[]byte:
*v = nil
case *Raw:
*v = nil
case *time.Time:
*v = time.Time{}
case reflect.Value:
decSetNonNilRV2Zero(v)
default:
if !fastpathDecodeSetZeroTypeSwitch(iv) {
decSetNonNilRV2Zero(rv)
}
}
}
// decSetNonNilRV2Zero will set the non-nil value to its zero value.
func decSetNonNilRV2Zero(v reflect.Value) {
// If not decodeable (settable), we do not touch it.
// We considered empty'ing it if not decodeable e.g.
// - if chan, drain it
// - if map, clear it
// - if slice or array, zero all elements up to len
//
// However, we decided instead that we either will set the
// whole value to the zero value, or leave AS IS.
k := v.Kind()
if k == reflect.Interface {
decSetNonNilRV2Zero4Intf(v)
} else if k == reflect.Ptr {
decSetNonNilRV2Zero4Ptr(v)
} else if v.CanSet() {
rvSetDirectZero(v)
}
}
func decSetNonNilRV2Zero4Ptr(v reflect.Value) {
ve := v.Elem()
if ve.CanSet() {
rvSetZero(ve) // we can have a pointer to an interface
} else if v.CanSet() {
rvSetZero(v)
}
}
func decSetNonNilRV2Zero4Intf(v reflect.Value) {
ve := v.Elem()
if ve.CanSet() {
rvSetDirectZero(ve) // interfaces always have element as a non-interface
} else if v.CanSet() {
rvSetZero(v)
}
}
func (d *decoderBase) arrayCannotExpand(sliceLen, streamLen int) {
if d.h.ErrorIfNoArrayExpand {
halt.errorf("cannot expand array len during decode from %v to %v", any(sliceLen), any(streamLen))
}
}
//go:noinline
func (d *decoderBase) haltAsNotDecodeable(rv reflect.Value) {
if !rv.IsValid() {
halt.onerror(errCannotDecodeIntoNil)
}
// check if an interface can be retrieved, before grabbing an interface
if !rv.CanInterface() {
halt.errorf("cannot decode into a value without an interface: %v", rv)
}
halt.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv))
}
func (d *decoderBase) depthIncr() {
d.depth++
if d.depth >= d.maxdepth {
halt.onerror(errMaxDepthExceeded)
}
}
func (d *decoderBase) depthDecr() {
d.depth--
}
func (d *decoderBase) arrayStart(v int) int {
if v != containerLenNil {
d.depthIncr()
d.c = containerArrayStart
}
return v
}
func (d *decoderBase) oneShotAddrRV(rvt reflect.Type, rvk reflect.Kind) reflect.Value {
// MARKER 2025: is this slow for calling oneShot?
if decUseTransient && d.h.getTypeInfo4RT(baseRT(rvt)).flagCanTransient {
return d.perType.TransientAddrK(rvt, rvk)
}
return rvZeroAddrK(rvt, rvk)
}
// decNegintPosintFloatNumberHelper is used for formats that are binary
// and have distinct ways of storing positive integers vs negative integers
// vs floats, which are uniquely identified by the byte descriptor.
//
// Currently, these formats are binc, cbor and simple.
type decNegintPosintFloatNumberHelper struct {
d decDriverI
}
func (x decNegintPosintFloatNumberHelper) uint64(ui uint64, neg, ok bool) uint64 {
if ok && !neg {
return ui
}
return x.uint64TryFloat(ok)
}
func (x decNegintPosintFloatNumberHelper) uint64TryFloat(neg bool) (ui uint64) {
if neg { // neg = true
halt.errorStr("assigning negative signed value to unsigned type")
}
f, ok := x.d.decFloat()
if !(ok && f >= 0 && noFrac64(math.Float64bits(f))) {
halt.errorStr2("invalid number loading uint64, with descriptor: ", x.d.descBd())
}
return uint64(f)
}
func (x decNegintPosintFloatNumberHelper) int64(ui uint64, neg, ok, cbor bool) (i int64) {
if ok {
return decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor)
}
// return x.int64TryFloat()
// }
// func (x decNegintPosintFloatNumberHelper) int64TryFloat() (i int64) {
f, ok := x.d.decFloat()
if !(ok && noFrac64(math.Float64bits(f))) {
halt.errorf("invalid number loading uint64 (%v), with descriptor: %s", f, x.d.descBd())
}
return int64(f)
}
func (x decNegintPosintFloatNumberHelper) float64(f float64, ok, cbor bool) float64 {
if ok {
return f
}
return x.float64TryInteger(cbor)
}
func (x decNegintPosintFloatNumberHelper) float64TryInteger(cbor bool) float64 {
ui, neg, ok := x.d.decInteger()
if !ok {
halt.errorStr2("invalid descriptor for float: ", x.d.descBd())
}
return float64(decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor))
}
func decNegintPosintFloatNumberHelperInt64v(ui uint64, neg, incrIfNeg bool) (i int64) {
if neg && incrIfNeg {
ui++
}
i = chkOvf.SignedIntV(ui)
if neg {
i = -i
}
return
}
// isDecodeable checks if value can be decoded into
//
// decode can take any reflect.Value that is a inherently addressable i.e.
// - non-nil chan (we will SEND to it)
// - non-nil slice (we will set its elements)
// - non-nil map (we will put into it)
// - non-nil pointer (we can "update" it)
// - func: no
// - interface: no
// - array: if canAddr=true
// - any other value pointer: if canAddr=true
func isDecodeable(rv reflect.Value) (canDecode bool, reason decNotDecodeableReason) {
switch rv.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map:
canDecode = !rvIsNil(rv)
reason = decNotDecodeableReasonNilReference
case reflect.Func, reflect.Interface, reflect.Invalid, reflect.UnsafePointer:
reason = decNotDecodeableReasonBadKind
default:
canDecode = rv.CanAddr()
reason = decNotDecodeableReasonNonAddrValue
}
return
}
// decInferLen will infer a sensible length, given the following:
// - clen: length wanted.
// - maxlen: max length to be returned.
// if <= 0, it is unset, and we infer it based on the unit size
// - unit: number of bytes for each element of the collection
func decInferLen(clen int, maxlen, unit uint) (n uint) {
// anecdotal testing showed increase in allocation with map length of 16.
// We saw same typical alloc from 0-8, then a 20% increase at 16.
// Thus, we set it to 8.
const (
minLenIfUnset = 8
maxMem = 1024 * 1024 // 1 MB Memory
)
// handle when maxlen is not set i.e. <= 0
// clen==0: use 0
// maxlen<=0, clen<0: use default
// maxlen> 0, clen<0: use default
// maxlen<=0, clen>0: infer maxlen, and cap on it
// maxlen> 0, clen>0: cap at maxlen
if clen == 0 || clen == containerLenNil {
return 0
}
if clen < 0 {
// if unspecified, return 64 for bytes, ... 8 for uint64, ... and everything else
return max(64/unit, minLenIfUnset)
}
if unit == 0 {
return uint(clen)
}
if maxlen == 0 {
maxlen = maxMem / unit
}
return min(uint(clen), maxlen)
}
type Decoder struct {
decoderI
}
// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
//
// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle
// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer).
func NewDecoder(r io.Reader, h Handle) *Decoder {
return &Decoder{h.newDecoder(r)}
}
// NewDecoderBytes returns a Decoder which efficiently decodes directly
// from a byte slice with zero copying.
func NewDecoderBytes(in []byte, h Handle) *Decoder {
return &Decoder{h.newDecoderBytes(in)}
}
// NewDecoderString returns a Decoder which efficiently decodes directly
// from a string with zero copying.
//
// It is a convenience function that calls NewDecoderBytes with a
// []byte view into the string.
//
// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag.
func NewDecoderString(s string, h Handle) *Decoder {
return NewDecoderBytes(bytesView(s), h)
}
// ----
func sideDecode(h Handle, p *sync.Pool, fn func(decoderI)) {
var s decoderI
if usePoolForSideDecode {
s = p.Get().(decoderI)
defer p.Put(s)
} else {
// initialization cycle error
// s = NewDecoderBytes(nil, h).decoderI
s = p.New().(decoderI)
}
fn(s)
}
func oneOffDecode(sd decoderI, v interface{}, in []byte, basetype reflect.Type, ext bool) {
sd.ResetBytes(in)
sd.decodeAs(v, basetype, ext)
// d.sideDecoder(xbs)
// d.sideDecode(rv, basetype)
}
func bytesOKdbi(v []byte, _ dBytesIntoState) []byte {
return v
}
func bytesOKs(bs []byte, _ dBytesAttachState) []byte {
return bs
}

File diff suppressed because it is too large Load diff

View file

@ -12,7 +12,7 @@ Supported Serialization formats are:
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
- simple: (unpublished)
This package will carefully use 'package unsafe' for performance reasons in specific places.
You can build without unsafe use by passing the safe or appengine tag
@ -78,6 +78,32 @@ Rich Feature Set includes:
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
# Supported build tags
We gain performance by code-generating fast-paths for slices and maps of built-in types,
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
The results are 20-40% performance improvements.
Building and running is configured using build tags as below.
At runtime:
- codec.safe: run in safe mode (not using unsafe optimizations)
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
Pls use these mostly during development - use codec.XXX in your go files.
Build only:
- codec.build: used to generate fastpath and monomorphization code
Test only:
- codec.notmammoth: skip the mammoth generated tests
# Extension Support
Users can register a function to handle the encoding or decoding of
@ -203,6 +229,10 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
go test -tags codec.safe -run Json
go test -tags "alltests codec.safe" -run Suite
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
go test -tags codec.notmono -run Json
Running Benchmarks
cd bench
@ -225,3 +255,87 @@ Embedded fields are encoded as if they exist in the top-level struct,
with some caveats. See Encode documentation.
*/
package codec
/*
Generics
Generics are used across to board to reduce boilerplate, and hopefully
improve performance by
- reducing need for interface calls (de-virtualization)
- resultant inlining of those calls
encoder/decoder --> Driver (json/cbor/...) --> input/output (bytes or io abstraction)
There are 2 * 5 * 2 (20) combinations of monomorphized values.
Key rules
- do not use top-level generic functions.
Due to type inference, monomorphizing them proves challenging
- only use generic methods.
Monomorphizing is done at the type once, and method names need not change
- do not have method calls have a parameter of an encWriter or decReader.
All those calls are handled directly by the driver.
- Include a helper type for each parameterized thing, and add all generic functions to them e.g.
helperEncWriter[T encWriter]
helperEncReader[T decReader]
helperEncDriver[T encDriver]
helperDecDriver[T decDriver]
- Always use T as the generic type name (when needed)
- No inline types
- No closures taking parameters of generic types
*/
/*
Naming convention:
Currently, as generic and non-generic types/functions/vars are put in the same files,
we suffer because:
- build takes longer as non-generic code is built when a build tag wants only monomorphised code
- files have many lines which are not used at runtime (due to type parameters)
- code coverage is inaccurate on a single run
To resolve this, we are streamlining our file naming strategy.
Basically, we will have the following nomenclature for filenames:
- fastpath (tag:notfastpath): *.notfastpath.*.go vs *.fastpath.*.go
- typed parameters (tag:notmono): *.notmono.*.go vs *.mono.*.go
- safe (tag:safe): *.safe.*.go vs *.unsafe.go
- generated files: *.generated.go
- all others (tags:N/A): *.go without safe/mono/fastpath/generated in the name
The following files will be affected and split/renamed accordingly
Base files:
- binc.go
- cbor.go
- json.go
- msgpack.go
- simple.go
- decode.go
- encode.go
For each base file, split into __file__.go (containing type parameters) and __file__.base.go.
__file__.go will only build with notmono.
Other files:
- fastpath.generated.go -> base.fastpath.generated.go and base.fastpath.notmono.generated.go
- fastpath.not.go -> base.notfastpath.go
- init.go -> init.notmono.go
Appropriate build tags will be included in the files, and the right ones only used for
monomorphization.
*/
/*
Caching Handle options for fast runtime use
If using cached values from Handle options, then
- re-cache them at each reset() call
- reset is always called at the start of each (Must)(En|De)code
- which calls (en|de)coder.reset([]byte|io.Reader|String)
- which calls (en|de)cDriver.reset()
- at reset, (en|de)c(oder|Driver) can re-cache Handle options before each run
Some examples:
- json: e.rawext,di,d,ks,is / d.rawext
- decode: (decoderBase) d.jsms,mtr,str,
*/

461
vendor/github.com/ugorji/go/codec/encode.base.go generated vendored Normal file
View file

@ -0,0 +1,461 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"cmp"
"errors"
"io"
"reflect"
"slices"
"sync"
"time"
)
var errEncoderNotInitialized = errors.New("encoder not initialized")
var encBuiltinRtids []uintptr
func init() {
for _, v := range []interface{}{
(string)(""),
(bool)(false),
(int)(0),
(int8)(0),
(int16)(0),
(int32)(0),
(int64)(0),
(uint)(0),
(uint8)(0),
(uint16)(0),
(uint32)(0),
(uint64)(0),
(uintptr)(0),
(float32)(0),
(float64)(0),
(complex64)(0),
(complex128)(0),
(time.Time{}),
([]byte)(nil),
(Raw{}),
// (interface{})(nil),
} {
t := reflect.TypeOf(v)
encBuiltinRtids = append(encBuiltinRtids, rt2id(t), rt2id(reflect.PointerTo(t)))
}
slices.Sort(encBuiltinRtids)
}
// encDriver abstracts the actual codec (binc vs msgpack, etc)
type encDriverI interface {
EncodeNil()
EncodeInt(i int64)
EncodeUint(i uint64)
EncodeBool(b bool)
EncodeFloat32(f float32)
EncodeFloat64(f float64)
// re is never nil
EncodeRawExt(re *RawExt)
// ext is never nil
EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
// EncodeString using cUTF8, honor'ing StringToRaw flag
EncodeString(v string)
EncodeStringNoEscape4Json(v string)
// encode a non-nil []byte
EncodeStringBytesRaw(v []byte)
// encode a []byte as nil, empty or encoded sequence of bytes depending on context
EncodeBytes(v []byte)
EncodeTime(time.Time)
WriteArrayStart(length int)
WriteArrayEnd()
WriteMapStart(length int)
WriteMapEnd()
// these write a zero-len map or array into the stream
WriteMapEmpty()
WriteArrayEmpty()
writeNilMap()
writeNilArray()
writeNilBytes()
// these are no-op except for json
encDriverContainerTracker
// reset will reset current encoding runtime state, and cached information from the handle
reset()
atEndOfEncode()
writerEnd()
writeBytesAsis(b []byte)
// writeStringAsisDblQuoted(v string)
resetOutBytes(out *[]byte)
resetOutIO(out io.Writer)
init(h Handle, shared *encoderBase, enc encoderI) (fp interface{})
// driverStateManager
}
type encInit2er struct{}
func (encInit2er) init2(enc encoderI) {}
type encDriverContainerTracker interface {
WriteArrayElem(firstTime bool)
WriteMapElemKey(firstTime bool)
WriteMapElemValue()
}
type encDriverNoState struct{}
// func (encDriverNoState) captureState() interface{} { return nil }
// func (encDriverNoState) resetState() {}
// func (encDriverNoState) restoreState(v interface{}) {}
func (encDriverNoState) reset() {}
type encDriverNoopContainerWriter struct{}
func (encDriverNoopContainerWriter) WriteArrayStart(length int) {}
func (encDriverNoopContainerWriter) WriteArrayEnd() {}
func (encDriverNoopContainerWriter) WriteMapStart(length int) {}
func (encDriverNoopContainerWriter) WriteMapEnd() {}
func (encDriverNoopContainerWriter) atEndOfEncode() {}
// encStructFieldObj[Slice] is used for sorting when there are missing fields and canonical flag is set
type encStructFieldObj struct {
key string
rv reflect.Value
intf interface{}
isRv bool
noEsc4json bool
builtin bool
}
type encStructFieldObjSlice []encStructFieldObj
func (p encStructFieldObjSlice) Len() int { return len(p) }
func (p encStructFieldObjSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p encStructFieldObjSlice) Less(i, j int) bool {
return p[uint(i)].key < p[uint(j)].key
}
// ----
type orderedRv[T cmp.Ordered] struct {
v T
r reflect.Value
}
func cmpOrderedRv[T cmp.Ordered](v1, v2 orderedRv[T]) int {
return cmp.Compare(v1.v, v2.v)
}
// ----
type encFnInfo struct {
ti *typeInfo
xfFn Ext
xfTag uint64
addrE bool
// addrEf bool // force: if addrE, then encode function MUST take a ptr
}
// ----
// EncodeOptions captures configuration options during encode.
type EncodeOptions struct {
// WriterBufferSize is the size of the buffer used when writing.
//
// if > 0, we use a smart buffer internally for performance purposes.
WriterBufferSize int
// ChanRecvTimeout is the timeout used when selecting from a chan.
//
// Configuring this controls how we receive from a chan during the encoding process.
// - If ==0, we only consume the elements currently available in the chan.
// - if <0, we consume until the chan is closed.
// - If >0, we consume until this timeout.
ChanRecvTimeout time.Duration
// StructToArray specifies to encode a struct as an array, and not as a map
StructToArray bool
// Canonical representation means that encoding a value will always result in the same
// sequence of bytes.
//
// This only affects maps, as the iteration order for maps is random.
//
// The implementation MAY use the natural sort order for the map keys if possible:
//
// - If there is a natural sort order (ie for number, bool, string or []byte keys),
// then the map keys are first sorted in natural order and then written
// with corresponding map values to the strema.
// - If there is no natural sort order, then the map keys will first be
// encoded into []byte, and then sorted,
// before writing the sorted keys and the corresponding map values to the stream.
//
Canonical bool
// CheckCircularRef controls whether we check for circular references
// and error fast during an encode.
//
// If enabled, an error is received if a pointer to a struct
// references itself either directly or through one of its fields (iteratively).
//
// This is opt-in, as there may be a performance hit to checking circular references.
CheckCircularRef bool
// RecursiveEmptyCheck controls how we determine whether a value is empty.
//
// If true, we descend into interfaces and pointers to reursively check if value is empty.
//
// We *might* check struct fields one by one to see if empty
// (if we cannot directly check if a struct value is equal to its zero value).
// If so, we honor IsZero, Comparable, IsCodecEmpty(), etc.
// Note: This *may* make OmitEmpty more expensive due to the large number of reflect calls.
//
// If false, we check if the value is equal to its zero value (newly allocated state).
RecursiveEmptyCheck bool
// Raw controls whether we encode Raw values.
// This is a "dangerous" option and must be explicitly set.
// If set, we blindly encode Raw values as-is, without checking
// if they are a correct representation of a value in that format.
// If unset, we error out.
Raw bool
// StringToRaw controls how strings are encoded.
//
// As a go string is just an (immutable) sequence of bytes,
// it can be encoded either as raw bytes or as a UTF string.
//
// By default, strings are encoded as UTF-8.
// but can be treated as []byte during an encode.
//
// Note that things which we know (by definition) to be UTF-8
// are ALWAYS encoded as UTF-8 strings.
// These include encoding.TextMarshaler, time.Format calls, struct field names, etc.
StringToRaw bool
// OptimumSize controls whether we optimize for the smallest size.
//
// Some formats will use this flag to determine whether to encode
// in the smallest size possible, even if it takes slightly longer.
//
// For example, some formats that support half-floats might check if it is possible
// to store a float64 as a half float. Doing this check has a small performance cost,
// but the benefit is that the encoded message will be smaller.
OptimumSize bool
// NoAddressableReadonly controls whether we try to force a non-addressable value
// to be addressable so we can call a pointer method on it e.g. for types
// that support Selfer, json.Marshaler, etc.
//
// Use it in the very rare occurrence that your types modify a pointer value when calling
// an encode callback function e.g. JsonMarshal, TextMarshal, BinaryMarshal or CodecEncodeSelf.
NoAddressableReadonly bool
// NilCollectionToZeroLength controls whether we encode nil collections (map, slice, chan)
// as nil (e.g. null if using JSON) or as zero length collections (e.g. [] or {} if using JSON).
//
// This is useful in many scenarios e.g.
// - encoding in go, but decoding the encoded stream in python
// where context of the type is missing but needed
//
// Note: this flag ignores the MapBySlice tag, and will encode nil slices, maps and chan
// in their natural zero-length formats e.g. a slice in json encoded as []
// (and not nil or {} if MapBySlice tag).
NilCollectionToZeroLength bool
}
// ---------------------------------------------
// encoderBase is shared as a field between Encoder and its encDrivers.
// This way, encDrivers need not hold a referece to the Encoder itself.
type encoderBase struct {
perType encPerType
h *BasicHandle
// MARKER: these fields below should belong directly in Encoder.
// There should not be any pointers here - just values.
// we pack them here for space efficiency and cache-line optimization.
rtidFn, rtidFnNoExt *atomicRtidFnSlice
// se encoderI
err error
blist bytesFreeList
// js bool // is json encoder?
// be bool // is binary encoder?
bytes bool
c containerState
calls uint16
seq uint16 // sequencer (e.g. used by binc for symbols, etc)
// ---- cpu cache line boundary
hh Handle
// ---- cpu cache line boundary
// ---- writable fields during execution --- *try* to keep in sep cache line
ci circularRefChecker
slist sfiRvFreeList
}
func (e *encoderBase) HandleName() string {
return e.hh.Name()
}
// Release is a no-op.
//
// Deprecated: Pooled resources are not used with an Encoder.
// This method is kept for compatibility reasons only.
func (e *encoderBase) Release() {
}
func (e *encoderBase) setContainerState(cs containerState) {
if cs != 0 {
e.c = cs
}
}
func (e *encoderBase) haltOnMbsOddLen(length int) {
if length&1 != 0 { // similar to &1==1 or %2 == 1
halt.errorInt("mapBySlice requires even slice length, but got ", int64(length))
}
}
// addrRV returns a addressable value given that rv is not addressable
func (e *encoderBase) addrRV(rv reflect.Value, typ, ptrType reflect.Type) (rva reflect.Value) {
// if rv.CanAddr() {
// return rvAddr(rv, ptrType)
// }
if e.h.NoAddressableReadonly {
rva = reflect.New(typ)
rvSetDirect(rva.Elem(), rv)
return
}
return rvAddr(e.perType.AddressableRO(rv), ptrType)
}
func (e *encoderBase) wrapErr(v error, err *error) {
*err = wrapCodecErr(v, e.hh.Name(), 0, true)
}
func (e *encoderBase) kErr(_ *encFnInfo, rv reflect.Value) {
halt.errorf("unsupported encoding kind: %s, for %#v", rv.Kind(), any(rv))
}
func chanToSlice(rv reflect.Value, rtslice reflect.Type, timeout time.Duration) (rvcs reflect.Value) {
rvcs = rvZeroK(rtslice, reflect.Slice)
if timeout < 0 { // consume until close
for {
recv, recvOk := rv.Recv()
if !recvOk {
break
}
rvcs = reflect.Append(rvcs, recv)
}
} else {
cases := make([]reflect.SelectCase, 2)
cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
if timeout == 0 {
cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
} else {
tt := time.NewTimer(timeout)
cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
}
for {
chosen, recv, recvOk := reflect.Select(cases)
if chosen == 1 || !recvOk {
break
}
rvcs = reflect.Append(rvcs, recv)
}
}
return
}
type encoderI interface {
Encode(v interface{}) error
MustEncode(v interface{})
Release()
Reset(w io.Writer)
ResetBytes(out *[]byte)
wrapErr(v error, err *error)
atEndOfEncode()
writerEnd()
encodeI(v interface{})
encodeR(v reflect.Value)
encodeAs(v interface{}, t reflect.Type, ext bool)
setContainerState(cs containerState) // needed for canonical encoding via side encoder
}
var errEncNoResetBytesWithWriter = errors.New("cannot reset an Encoder which outputs to []byte with a io.Writer")
var errEncNoResetWriterWithBytes = errors.New("cannot reset an Encoder which outputs to io.Writer with a []byte")
type encDriverContainerNoTrackerT struct{}
func (encDriverContainerNoTrackerT) WriteArrayElem(firstTime bool) {}
func (encDriverContainerNoTrackerT) WriteMapElemKey(firstTime bool) {}
func (encDriverContainerNoTrackerT) WriteMapElemValue() {}
type Encoder struct {
encoderI
}
// NewEncoder returns an Encoder for encoding into an io.Writer.
//
// For efficiency, Users are encouraged to configure WriterBufferSize on the handle
// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer).
func NewEncoder(w io.Writer, h Handle) *Encoder {
return &Encoder{h.newEncoder(w)}
}
// NewEncoderBytes returns an encoder for encoding directly and efficiently
// into a byte slice, using zero-copying to temporary slices.
//
// It will potentially replace the output byte slice pointed to.
// After encoding, the out parameter contains the encoded contents.
func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
return &Encoder{h.newEncoderBytes(out)}
}
// ----
func sideEncode(h Handle, p *sync.Pool, fn func(encoderI)) {
var s encoderI
if usePoolForSideEncode {
s = p.Get().(encoderI)
defer p.Put(s)
} else {
// initialization cycle error
// s = NewEncoderBytes(nil, h).encoderI
s = p.New().(encoderI)
}
fn(s)
}
func oneOffEncode(se encoderI, v interface{}, out *[]byte, basetype reflect.Type, ext bool) {
se.ResetBytes(out)
se.encodeAs(v, basetype, ext)
se.atEndOfEncode()
se.writerEnd()
// e.sideEncoder(&bs)
// e.sideEncode(v, basetype, 0)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,555 +0,0 @@
// +build !notfastpath
// +build !codec.notfastpath
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fast-path.go.tmpl - DO NOT EDIT.
package codec
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices.
//
// We define the functions and register them in this single file
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
// This file can be omitted without causing a build failure.
//
// The advantage of fast paths is:
// - Many calls bypass reflection altogether
//
// Currently support
// - slice of all builtin types (numeric, bool, string, []byte)
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
// AND values of type type int8/16/32, uint16/32
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example:
// m2 := map[string]int{}
// p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
//
{{/*
fastpathEncMapStringUint64R (called by fastpath...switch)
EncMapStringUint64V (called by codecgen)
fastpathEncSliceBoolR: (called by fastpath...switch) (checks f.ti.mbs and calls one of them below)
EncSliceBoolV (also called by codecgen)
EncAsMapSliceBoolV (delegate when mapbyslice=true)
fastpathDecSliceIntfR (called by fastpath...switch) (calls Y or N below depending on if it can be updated)
DecSliceIntfX (called by codecgen) (calls Y below)
DecSliceIntfY (delegate when slice CAN be updated)
DecSliceIntfN (delegate when slice CANNOT be updated e.g. from array or non-addressable slice)
fastpathDecMap...R (called by fastpath...switch) (calls L or X? below)
DecMap...X (called by codecgen)
DecMap...L (delegated to by both above)
*/ -}}
import (
"reflect"
"sort"
)
const fastpathEnabled = true
{{/*
const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v"
*/ -}}
type fastpathT struct {}
var fastpathTV fastpathT
type fastpathE struct {
{{/* rtid uintptr */ -}}
rt reflect.Type
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
}
type fastpathA [{{ .FastpathLen }}]fastpathE
type fastpathARtid [{{ .FastpathLen }}]uintptr
var fastpathAv fastpathA
var fastpathAvRtid fastpathARtid
type fastpathAslice struct{}
func (fastpathAslice) Len() int { return {{ .FastpathLen }} }
func (fastpathAslice) Less(i, j int) bool {
return fastpathAvRtid[uint(i)] < fastpathAvRtid[uint(j)]
}
func (fastpathAslice) Swap(i, j int) {
fastpathAvRtid[uint(i)], fastpathAvRtid[uint(j)] = fastpathAvRtid[uint(j)], fastpathAvRtid[uint(i)]
fastpathAv[uint(i)], fastpathAv[uint(j)] = fastpathAv[uint(j)], fastpathAv[uint(i)]
}
func fastpathAvIndex(rtid uintptr) int {
// use binary search to grab the index (adapted from sort/search.go)
// Note: we use goto (instead of for loop) so this can be inlined.
// h, i, j := 0, 0, {{ .FastpathLen }}
var h, i uint
var j uint = {{ .FastpathLen }}
LOOP:
if i < j {
h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
if fastpathAvRtid[h] < rtid {
i = h + 1
} else {
j = h
}
goto LOOP
}
if i < {{ .FastpathLen }} && fastpathAvRtid[i] == rtid {
return int(i)
}
return -1
}
// due to possible initialization loop error, make fastpath in an init()
func init() {
var i uint = 0
fn := func(v interface{},
fe func(*Encoder, *codecFnInfo, reflect.Value),
fd func(*Decoder, *codecFnInfo, reflect.Value)) {
xrt := reflect.TypeOf(v)
xptr := rt2id(xrt)
fastpathAvRtid[i] = xptr
fastpathAv[i] = fastpathE{xrt, fe, fd}
i++
}
{{/* do not register []byte in fast-path */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}
sort.Sort(fastpathAslice{})
}
// -- encode
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *[]{{ .Elem }}:
if *v == nil {
e.e.EncodeNil()
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *map[{{ .MapKey }}]{{ .Elem }}:
if *v == nil {
e.e.EncodeNil()
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
}
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
var v []{{ .Elem }}
if rv.Kind() == reflect.Array {
rvGetSlice4Array(rv, &v)
} else {
v = rv2i(rv).([]{{ .Elem }})
}
if f.ti.mbs {
fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(v, e)
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
}
}
func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil(); return } */ -}}
{{ if eq .Elem "uint8" "byte" -}}
e.e.EncodeStringBytesRaw(v)
{{ else -}}
e.arrayStart(len(v))
for j := range v {
e.arrayElem()
{{ encmd .Elem "v[j]"}}
}
e.arrayEnd()
{{ end -}}
}
func (fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil() } else */ -}}
e.haltOnMbsOddLen(len(v))
{{/*
if len(v)&1 != 0 { // similar to &1==1 or %2 == 1
e.errorf(fastpathMapBySliceErrMsg, len(v))
}
*/ -}}
e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
for j := range v {
if j&1 == 0 { // if j%2 == 0 {
e.mapElemKey()
} else {
e.mapElemValue()
}
{{ encmd .Elem "v[j]"}}
}
e.mapEnd()
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
}
func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil(); return } */ -}}
e.mapStart(len(v))
if e.h.Canonical { {{/* need to figure out .NoCanonical */}}
{{if eq .MapKey "interface{}"}}{{/* out of band */ -}}
var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
e2 := NewEncoderBytes(&mksv, e.hh)
v2 := make([]bytesIntf, len(v))
var i, l uint {{/* put loop variables outside. seems currently needed for better perf */}}
var vp *bytesIntf
for k2 := range v {
l = uint(len(mksv))
e2.MustEncode(k2)
vp = &v2[i]
vp.v = mksv[l:]
vp.i = k2
i++
}
sort.Sort(bytesIntfSlice(v2))
for j := range v2 {
e.mapElemKey()
e.asis(v2[j].v)
e.mapElemValue()
e.encode(v[v2[j].i])
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
var i uint
for k := range v {
v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}}
i++
}
sort.Sort({{ sorttype .MapKey false}}(v2))
for _, k2 := range v2 {
e.mapElemKey()
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}}
e.mapElemValue()
{{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }}
} {{end}}
} else {
for k2, v2 := range v {
e.mapElemKey()
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}}
e.mapElemValue()
{{ encmd .Elem "v2"}}
}
}
e.mapEnd()
}
{{end}}{{end}}{{end -}}
// -- decode
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
var changed bool
var containerLen int
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d)
case *[]{{ .Elem }}:
var v2 []{{ .Elem }}
if v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
*v = v2
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
// maps only change if nil, and in that case, there's no point copying
*/ -}}
case map[{{ .MapKey }}]{{ .Elem }}:
containerLen = d.mapStart(d.d.ReadMapStart())
if containerLen != containerLenNil {
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d)
}
d.mapEnd()
}
case *map[{{ .MapKey }}]{{ .Elem }}:
{{/*
containerLen = d.mapStart(d.d.ReadMapStart())
if containerLen == 0 {
d.mapEnd()
} else if containerLen == containerLenNil {
*v = nil
} else {
if *v == nil {
*v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d)
}
// consider delegating fully to X - encoding *map is uncommon, so ok to pay small function call cost
*/ -}}
fastpathTV.{{ .MethodNamePfx "Dec" false }}X(v, d)
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case *[]{{ .Elem }}:
*v = nil
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case *map[{{ .MapKey }}]{{ .Elem }}:
*v = nil
{{end}}{{end}}{{end}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
{{/*
Slices can change if they
- did not come from an array
- are addressable (from a ptr)
- are settable (e.g. contained in an interface{})
*/}}
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
{{/*
// seqTypeArray=true means that we are not getting a pointer, so no need to check that.
if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr {
*/ -}}
var v []{{ .Elem }}
switch rv.Kind() {
case reflect.Ptr:
vp := rv2i(rv).(*[]{{ .Elem }})
var changed bool
if v, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed {
*vp = v
}
case reflect.Array:
rvGetSlice4Array(rv, &v)
fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d)
default:
fastpathTV.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v }
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *Decoder) (v2 []{{ .Elem }}, changed bool) {
{{ if eq .Elem "uint8" "byte" -}}
switch d.d.ContainerType() {
case valueTypeNil, valueTypeMap:
break
default:
v2 = d.decodeBytesInto(v[:len(v):len(v)])
changed = !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) // not same slice
return
}
{{ end -}}
slh, containerLenS := d.decSliceHelperStart()
if slh.IsNil {
if v == nil { return }
return nil, true
}
if containerLenS == 0 {
if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] }
slh.End()
return v, true
}
hasLen := containerLenS > 0
var xlen int
if hasLen {
if containerLenS > cap(v) {
xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
if xlen <= cap(v) {
v = v[:uint(xlen)]
} else {
v = make([]{{ .Elem }}, uint(xlen))
}
changed = true
} else if containerLenS != len(v) {
v = v[:containerLenS]
changed = true
}
}
var j int
for j = 0; d.containerNext(j, containerLenS, hasLen); j++ {
if j == 0 && len(v) == 0 { // means hasLen == false
xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) {{/* xlen = decDefSliceCap */}}
v = make([]{{ .Elem }}, uint(xlen))
changed = true
}
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
if j >= len(v) {
v = append(v, {{ zerocmd .Elem }})
changed = true
}
slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
}
if j < len(v) {
v = v[:uint(j)]
changed = true
} else if j == 0 && v == nil {
v = []{{ .Elem }}{}
changed = true
}
slh.End()
return v, changed
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *Decoder) {
{{ if eq .Elem "uint8" "byte" -}}
switch d.d.ContainerType() {
case valueTypeNil, valueTypeMap:
break
default:
v2 := d.decodeBytesInto(v[:len(v):len(v)])
if !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) { // not same slice
copy(v, v2)
}
return
}
{{ end -}}
slh, containerLenS := d.decSliceHelperStart()
if slh.IsNil {
return
}
if containerLenS == 0 {
slh.End()
return
}
hasLen := containerLenS > 0
for j := 0; d.containerNext(j, containerLenS, hasLen); j++ {
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
if j >= len(v) {
slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
return
}
slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" -}}
d.decode(&v[uint(j)])
{{- else -}}
v[uint(j)] = {{ decmd .Elem false }}
{{- end }}
}
slh.End()
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
{{/*
Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
Also, these methods are called by decodeValue directly, after handling a TryNil.
Consequently, there's no need to check for containerLenNil here.
*/ -}}
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
containerLen := d.mapStart(d.d.ReadMapStart())
{{/*
if containerLen == containerLenNil {
if rv.Kind() == reflect.Ptr {
*(rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})) = nil
}
return
}
*/ -}}
if rv.Kind() == reflect.Ptr {
vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
} else if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
}
d.mapEnd()
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
containerLen := d.mapStart(d.d.ReadMapStart())
if containerLen == containerLenNil {
*vp = nil
} else {
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
if containerLen != 0 {
f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
d.mapEnd()
}
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *Decoder) {
{{/* No need to check if containerLen == containerLenNil, as that is checked by R and L above */ -}}
if v == nil {
d.errorf("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: %v", containerLen)
{{/* d.swallowMapContents(containerLen) */ -}}
return
}
{{if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
{{else if eq .Elem "bytes" "[]byte" }}mapGet := v != nil && !d.h.MapValueReset
{{end -}}
var mk {{ .MapKey }}
var mv {{ .Elem }}
hasLen := containerLen > 0
for j := 0; d.containerNext(j, containerLen, hasLen); j++ {
d.mapElemKey()
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.stringZC(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey true }}{{ end }}
d.mapElemValue()
{{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
if mapGet { mv = v[mk] } else { mv = nil }
{{ end -}}
{{ if eq .Elem "interface{}" -}}
d.decode(&mv)
{{ else if eq .Elem "[]byte" "bytes" -}}
mv = d.decodeBytesInto(mv)
{{ else -}}
mv = {{ decmd .Elem false }}
{{ end -}}
v[mk] = mv
}
}
{{end}}{{end}}{{end}}

134
vendor/github.com/ugorji/go/codec/fastpath.go.tmpl generated vendored Normal file
View file

@ -0,0 +1,134 @@
//go:build !notfastpath && !codec.notfastpath
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fastpath.go.tmpl - DO NOT EDIT.
package codec
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices.
//
// We define the functions and register them in this single file
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
// This file can be omitted without causing a build failure.
//
// The advantage of fast paths is:
// - Many calls bypass reflection altogether
//
// Currently support
// - slice of all builtin types (numeric, bool, string, []byte)
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
// AND values of type type int8/16/32, uint16/32
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example:
// m2 := map[string]int{}
// p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
//
{{/*
// ----------------
fastpathEncMap<KV>R func (mapped to type id), routes to:
- ft.EncMap<KV>V
fastpathEncSlice<E>R func (mapped to type id), delegates to one of:
- ft.EncSlice<E>V
- ft.EncAsMapSlice<E>V (when mapbyslice ie f.ti.mbs=true)
// ----------------
fastpathDecSlice<E>R func (mapped to type id), delegates to:
- ft.DecSliceIntfY (when slice CAN be updated)
- ft.DecSliceIntfN (when slice CANNOT be updated e.g. from array or non-addressable slice)
fastpathDecMap<KV>R func (mapped to type id), routes to
- ft.DecMap<KV>L (handles ptr which is changeable, and non-pointer which cannot be made if nil)
// ----------------
NOTE:
- fastpath typeswitch directly calls the secondary methods for builtin maps/slices with appropriate nil handling:
- except EncAsMapSlice<E>V which only applies to wrapper types not those in the switch
- fastpathEncXXX functions mapped to type ID MUST do nil-checks during encode
- they are only called by decodeValue/encodeValue or other code (same way kMap et al are called)
*/ -}}
import (
"reflect"
"sort"
"slices"
)
const fastpathEnabled = true
{{/*
const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v"
*/ -}}
type fastpathARtid [{{ .FastpathLen }}]uintptr
type fastpathRtRtid struct {
rtid uintptr
rt reflect.Type
}
type fastpathARtRtid [{{ .FastpathLen }}]fastpathRtRtid
var (
fastpathAvRtidArr fastpathARtid
fastpathAvRtRtidArr fastpathARtRtid
fastpathAvRtid = fastpathAvRtidArr[:]
fastpathAvRtRtid = fastpathAvRtRtidArr[:]
)
func fastpathAvIndex(rtid uintptr) (i uint, ok bool) {
return searchRtids(fastpathAvRtid, rtid)
}
func init() {
var i uint = 0
fn := func(v interface{}) {
xrt := reflect.TypeOf(v)
xrtid := rt2id(xrt)
xptrtid := rt2id(reflect.PointerTo(xrt))
{{- /* only the base slice/map rtid is put in fastpathAvIndex, since we only handle slices/map/array */}}
fastpathAvRtid[i] = xrtid
fastpathAvRtRtid[i] = fastpathRtRtid{ rtid: xrtid, rt: xrt }
{{- /* fastpath type switches however handle slices/map/array, and pointers to them */}}
encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid)
decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid)
i++
}
{{/* do not register []byte in fastpath */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
fn([]{{ .Elem }}(nil))
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil))
{{end}}{{end}}{{end}}
sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] })
sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid })
slices.Sort(encBuiltinRtids)
slices.Sort(decBuiltinRtids)
}
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case *[]{{ .Elem }}:
*v = nil
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case *map[{{ .MapKey }}]{{ .Elem }}:
*v = nil
{{end}}{{end}}{{end}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}

View file

@ -0,0 +1,525 @@
//go:build !notfastpath && !codec.notfastpath && (notmono || codec.notmono)
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fastpath.notmono.go.tmpl - DO NOT EDIT.
package codec
import (
"reflect"
"sort"
"slices"
)
type fastpathE[T encDriver] struct {
rtid uintptr
rt reflect.Type
encfn func(*encoder[T], *encFnInfo, reflect.Value)
}
type fastpathD[T decDriver] struct {
rtid uintptr
rt reflect.Type
decfn func(*decoder[T], *decFnInfo, reflect.Value)
}
type fastpathEs[T encDriver] [{{ .FastpathLen }}]fastpathE[T]
type fastpathDs[T decDriver] [{{ .FastpathLen }}]fastpathD[T]
type fastpathET[T encDriver] struct{}
type fastpathDT[T decDriver] struct{}
func (helperEncDriver[T]) fastpathEList() *fastpathEs[T] {
var i uint = 0
var s fastpathEs[T]
fn := func(v interface{}, fe func(*encoder[T], *encFnInfo, reflect.Value)) {
xrt := reflect.TypeOf(v)
s[i] = fastpathE[T]{rt2id(xrt), xrt, fe}
i++
}
{{/* do not register []byte in fastpath */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
fn([]{{ .Elem }}(nil), (*encoder[T]).{{ .MethodNamePfx "fastpathEnc" false }}R)
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encoder[T]).{{ .MethodNamePfx "fastpathEnc" false }}R)
{{end}}{{end}}{{end}}
sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid })
return &s
}
func (helperDecDriver[T]) fastpathDList() *fastpathDs[T] {
var i uint = 0
var s fastpathDs[T]
fn := func(v interface{}, fd func(*decoder[T], *decFnInfo, reflect.Value)) {
xrt := reflect.TypeOf(v)
s[i] = fastpathD[T]{rt2id(xrt), xrt, fd}
i++
}
{{/* do not register []byte in fastpath */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
fn([]{{ .Elem }}(nil), (*decoder[T]).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*decoder[T]).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}
sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid })
return &s
}
// -- encode
// -- -- fast path type switch
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool {
var ft fastpathET[T]
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
if v == nil { e.e.writeNilArray() } else { ft.{{ .MethodNamePfx "Enc" false }}V(v, e) }
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case map[{{ .MapKey }}]{{ .Elem }}:
if v == nil { e.e.writeNilMap() } else { ft.{{ .MethodNamePfx "Enc" false }}V(v, e) }
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
func (e *encoder[T]) {{ .MethodNamePfx "fastpathEnc" false }}R(f *encFnInfo, rv reflect.Value) {
var ft fastpathET[T]
var v []{{ .Elem }}
if rv.Kind() == reflect.Array {
rvGetSlice4Array(rv, &v)
} else {
v = rv2i(rv).([]{{ .Elem }})
}
if f.ti.mbs {
ft.{{ .MethodNamePfx "EncAsMap" false }}V(v, e)
return
}
ft.{{ .MethodNamePfx "Enc" false }}V(v, e)
}
func (fastpathET[T]) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *encoder[T]) {
{{ if eq .Elem "uint8" "byte" -}}
e.e.EncodeStringBytesRaw(v)
{{ else -}}
if len(v) == 0 {
e.c = 0; e.e.WriteArrayEmpty()
return
}
e.arrayStart(len(v))
for j := range v {
e.c = containerArrayElem; e.e.WriteArrayElem(j == 0)
{{ encmd .Elem "v[j]"}}
}
e.c = 0; e.e.WriteArrayEnd()
{{ end -}}
}
func (fastpathET[T]) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *encoder[T]) {
if len(v) == 0 {
e.c = 0; e.e.WriteMapEmpty()
return
}
e.haltOnMbsOddLen(len(v))
e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
for j := range v {
if j&1 == 0 { // if j%2 == 0 {
e.c = containerMapKey; e.e.WriteMapElemKey(j == 0)
} else {
e.mapElemValue()
}
{{ encmd .Elem "v[j]"}}
}
e.c = 0; e.e.WriteMapEnd()
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
func (e *encoder[T]) {{ .MethodNamePfx "fastpathEnc" false }}R(f *encFnInfo, rv reflect.Value) {
{{/* var ft fastpathET[T]
ft.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) */ -}}
fastpathET[T]{}.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
}
func (fastpathET[T]) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *encoder[T]) {
{{/* if v == nil { e.e.EncodeNil(); return } */ -}}
if len(v) == 0 {
e.e.WriteMapEmpty()
return
}
var i uint
e.mapStart(len(v))
if e.h.Canonical { {{/* need to figure out .NoCanonical */}}
{{if eq .MapKey "interface{}"}}{{/* out of band */ -}}
var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
e2 := NewEncoderBytes(&mksv, e.hh)
v2 := make([]bytesIntf, len(v))
var l uint {{/* put loop variables outside. seems currently needed for better perf */}}
var vp *bytesIntf
for k2 := range v {
l = uint(len(mksv))
e2.MustEncode(k2)
vp = &v2[i]
vp.v = mksv[l:]
vp.i = k2
i++
}
slices.SortFunc(v2, cmpBytesIntf)
for j := range v2 {
e.c = containerMapKey; e.e.WriteMapElemKey(j == 0)
e.asis(v2[j].v)
e.mapElemValue()
e.encode(v[v2[j].i])
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
for k := range v {
v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}}
i++
}
slices.Sort(v2)
{{/* // sort.Sort({{ sorttype .MapKey false}}(v2)) */ -}}
for i, k2 := range v2 {
e.c = containerMapKey; e.e.WriteMapElemKey(i == 0)
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}}
e.mapElemValue()
{{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }}
} {{end}}
} else {
i = 0
for k2, v2 := range v {
e.c = containerMapKey; e.e.WriteMapElemKey(i == 0)
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}}
e.mapElemValue()
{{ encmd .Elem "v2"}}
i++
}
}
e.c = 0; e.e.WriteMapEnd()
}
{{end}}{{end}}{{end -}}
// -- decode
// -- -- fast path type switch
func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool {
var ft fastpathDT[T]
var changed bool
var containerLen int
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
ft.{{ .MethodNamePfx "Dec" false }}N(v, d)
case *[]{{ .Elem }}:
var v2 []{{ .Elem }}
if v2, changed = ft.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
*v = v2
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
// maps only change if nil, and in that case, there's no point copying
*/ -}}
case map[{{ .MapKey }}]{{ .Elem }}:
if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil {
if containerLen != 0 {
ft.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d)
}
d.mapEnd()
}
case *map[{{ .MapKey }}]{{ .Elem }}:
if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil {
*v = nil
} else {
if *v == nil {
*v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }}))
}
if containerLen != 0 {
ft.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d)
}
d.mapEnd()
}
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
{{/*
Slices can change if they
- did not come from an array
- are addressable (from a ptr)
- are settable (e.g. contained in an interface{})
*/}}
func (d *decoder[T]) {{ .MethodNamePfx "fastpathDec" false }}R(f *decFnInfo, rv reflect.Value) {
var ft fastpathDT[T]
{{/*
// seqTypeArray=true means that we are not getting a pointer, so no need to check that.
if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr {
*/ -}}
switch rv.Kind() {
case reflect.Ptr: {{- /* this block is called for types that wrap a fastpath type e.g. wrapSliceUint64 */}}
v := rv2i(rv).(*[]{{ .Elem }})
if vv, changed := ft.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
*v = vv
}
case reflect.Array:
var v []{{ .Elem }}
rvGetSlice4Array(rv, &v)
ft.{{ .MethodNamePfx "Dec" false }}N(v, d)
default:
ft.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d)
}
}
func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *decoder[T]) (v2 []{{ .Elem }}, changed bool) {
ctyp := d.d.ContainerType()
if ctyp == valueTypeNil {
return nil, v != nil
}
{{ if eq .Elem "uint8" "byte" -}}
if ctyp != valueTypeMap {
var dbi dBytesIntoState
v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false)
return v2, dbi != dBytesIntoParamOut
}
containerLenS := d.mapStart(d.d.ReadMapStart()) * 2
{{ else -}}
var containerLenS int
isArray := ctyp == valueTypeArray
if isArray {
containerLenS = d.arrayStart(d.d.ReadArrayStart())
} else if ctyp == valueTypeMap {
containerLenS = d.mapStart(d.d.ReadMapStart()) * 2
} else {
halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String())
}
{{ end -}}
hasLen := containerLenS >= 0
var j int
fnv := func(dst []{{ .Elem }}) { v, changed = dst, true }
for ; d.containerNext(j, containerLenS, hasLen); j++ {
if j == 0 {
if containerLenS == len(v) {
} else if containerLenS < 0 || containerLenS > cap(v) {
if xlen := int(decInferLen(containerLenS, d.maxInitLen(), {{ .Size }})); xlen <= cap(v) {
fnv(v[:uint(xlen)])
} else {
v2 = make([]{{ .Elem }}, uint(xlen))
copy(v2, v)
fnv(v2)
}
} else {
fnv(v[:containerLenS])
}
}
{{ if eq .Elem "uint8" "byte" }}{{ else -}}
if isArray { d.arrayElem(j == 0) } else
{{ end -}}
if j&1 == 0 { d.mapElemKey(j == 0) } else { d.mapElemValue() }
if j >= len(v) { {{- /* // if indefinite, json, etc, then expand the slice (if necessary) */}}
fnv(append(v, {{ zerocmd .Elem }}))
}
{{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
}
if j < len(v) {
fnv(v[:uint(j)])
} else if j == 0 && v == nil {
fnv([]{{ .Elem }}{})
}
{{ if eq .Elem "uint8" "byte" -}}
d.mapEnd()
{{ else -}}
if isArray { d.arrayEnd() } else { d.mapEnd() }
{{ end -}}
return v, changed
}
func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *decoder[T]) {
ctyp := d.d.ContainerType()
if ctyp == valueTypeNil {
return
}
{{ if eq .Elem "uint8" "byte" -}}
if ctyp != valueTypeMap {
d.decodeBytesInto(v[:len(v):len(v)], true)
return
}
containerLenS := d.mapStart(d.d.ReadMapStart()) * 2
{{ else -}}
var containerLenS int
isArray := ctyp == valueTypeArray
if isArray {
containerLenS = d.arrayStart(d.d.ReadArrayStart())
} else if ctyp == valueTypeMap {
containerLenS = d.mapStart(d.d.ReadMapStart()) * 2
} else {
halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String())
}
{{ end -}}
hasLen := containerLenS >= 0
for j := 0; d.containerNext(j, containerLenS, hasLen); j++ {
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
{{ if not (eq .Elem "uint8" "byte") -}}
if isArray { d.arrayElem(j == 0) } else
{{ end -}}
if j&1 == 0 { d.mapElemKey(j == 0) } else { d.mapElemValue() }
if j < len(v) {
{{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
} else {
d.arrayCannotExpand(len(v), j+1)
d.swallow()
}
}
{{ if eq .Elem "uint8" "byte" -}}
d.mapEnd()
{{ else -}}
if isArray { d.arrayEnd() } else { d.mapEnd() }
{{ end -}}
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
{{/*
Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
*/ -}}
func (d *decoder[T]) {{ .MethodNamePfx "fastpathDec" false }}R(f *decFnInfo, rv reflect.Value) {
var ft fastpathDT[T]
containerLen := d.mapStart(d.d.ReadMapStart())
if rv.Kind() == reflect.Ptr { {{- /* this block is called for types that wrap a fastpath type e.g. wrapMapStringUint64 */}}
vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }}))
}
if containerLen != 0 {
ft.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
} else if containerLen != 0 {
ft.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
}
d.mapEnd()
}
func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *decoder[T]) {
if v == nil {
halt.errorInt("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: ", int64(containerLen))
{{/* d.swallowMapContents(containerLen); return */ -}}
}
{{if eq .MapKey "interface{}" -}}
var mk {{ .MapKey }}
{{end -}}
{{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
var mv {{ .Elem }}
mapGet := !d.h.MapValueReset
{{- if eq .Elem "interface{}" -}}
&& !d.h.InterfaceReset
{{- end}}
{{end -}}
hasLen := containerLen >= 0
for j := 0; d.containerNext(j, containerLen, hasLen); j++ {
d.mapElemKey(j == 0)
{{ if eq .MapKey "interface{}" -}}
mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.detach2Str(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk := {{ decmd .MapKey true }}{{ end }}
d.mapElemValue()
{{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
if mapGet { mv = v[mk] } else { mv = nil }
{{ end -}}
{{ if eq .Elem "interface{}" -}}
d.decode(&mv)
v[mk] = mv
{{ else if eq .Elem "[]byte" "bytes" -}}
v[mk], _ = d.decodeBytesInto(mv, false)
{{ else -}}
v[mk] = {{ decmd .Elem false }}
{{ end -}}
}
}
{{end}}{{end}}{{end}}
{{- /*
// -- -- fast path type switch
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool {
var ft fastpathET[T]
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
if v != nil {
ft.{{ .MethodNamePfx "Enc" false }}V(v, e)
} else if e.h.NilCollectionToZeroLength {
e.e.WriteArrayEmpty()
} else {
e.e.EncodeNil()
}
case *[]{{ .Elem }}:
if *v != nil {
ft.{{ .MethodNamePfx "Enc" false }}V(*v, e)
} else if e.h.NilCollectionToZeroLength {
e.e.WriteArrayEmpty()
} else {
e.e.EncodeNil()
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case map[{{ .MapKey }}]{{ .Elem }}:
if v != nil {
ft.{{ .MethodNamePfx "Enc" false }}V(v, e)
} else if e.h.NilCollectionToZeroLength {
e.e.WriteMapEmpty()
} else {
e.e.EncodeNil()
}
case *map[{{ .MapKey }}]{{ .Elem }}:
if *v != nil {
ft.{{ .MethodNamePfx "Enc" false }}V(*v, e)
} else if e.h.NilCollectionToZeroLength {
e.e.WriteMapEmpty()
} else {
e.e.EncodeNil()
}
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// used within codecgen, which is no longer supported
func (f fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *decoder[T]) {
if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v }
}
func (f fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *decoder[T]) {
containerLen := d.mapStart(d.d.ReadMapStart())
if containerLen == containerLenNil {
*vp = nil
return
}
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }}))
}
if containerLen != 0 {
f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
d.mapEnd()
}
*/ -}}

View file

@ -1,90 +0,0 @@
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
{{if not isArray -}}
var {{var "c"}} bool {{/* // changed */}}
_ = {{var "c"}}
if {{var "h"}}.IsNil {
if {{var "v"}} != nil {
{{var "v"}} = nil
{{var "c"}} = true
}
} else {{end -}}
if {{var "l"}} == 0 {
{{if isSlice -}}
if {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
} else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{else if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
}
{{end -}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
_ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
}
{{end -}}
var {{var "j"}} int
{{/* // var {{var "dn"}} bool */ -}}
for {{var "j"}} = 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
}
{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
{{var "c"}} = true
}
{{end -}}
{{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}}
{{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
{{ decLineVar $x -}}
{{var "v"}} <- {{ $x }}
{{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
{{var "c"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end -}}
}
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}}
}
{{end -}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
}
{{end -}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}
{{end -}}

View file

@ -1,58 +0,0 @@
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := z.DecReadMapStart()
if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} {
*{{ .Varname }} = nil
} else {
if {{var "v"}} == nil {
{{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
{{ $mk := var "mk" -}}
var {{ $mk }} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if z.DecBasicHandle().MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
z.DecReadMapElemKey()
{{ if eq .KTyp "string" -}}
{{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}}
{{ else -}}
{{ decLineVarK $mk -}}
{{ end -}}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}}
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = z.DecStringZC({{var "bv"}})
}
{{ end -}}
{{if decElemKindPtr -}}
{{var "ms"}} = true
{{end -}}
if {{var "mg"}} {
{{if decElemKindPtr -}}
{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}]
if {{var "mok"}} {
{{var "ms"}} = false
}
{{else -}}
{{var "mv"}} = {{var "v"}}[{{ $mk }}]
{{end -}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecReadMapElemValue()
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}}
if {{var "mdn"}} {
{{var "v"}}[{{ $mk }}] = {{decElemZero}}
} else {{if decElemKindPtr}} if {{var "ms"}} {{end}} {
{{var "v"}}[{{ $mk }}] = {{var "mv"}}
}
}
} // else len==0: leave as-is (do not clear map entries)
z.DecReadMapEnd()
}

View file

@ -1,27 +0,0 @@
{{.Label}}:
switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
case timeout{{.Sfx}} == 0: // only consume available
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
default:
break {{.Label}}
}
}
case timeout{{.Sfx}} > 0: // consume until timeout
tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
case <-tt{{.Sfx}}.C:
// close(tt.C)
break {{.Label}}
}
}
default: // consume until close
for b{{.Sfx}} := range {{.Chan}} {
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
}
}

View file

@ -1,294 +0,0 @@
// comment this out // + build ignore
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
package codec
import (
"encoding"
"reflect"
)
// GenVersion is the current version of codecgen.
const GenVersion = 28
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelper() (g genHelper) { return }
type genHelper struct{}
func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
ge = genHelperEncoder{e: e}
ee = genHelperEncDriver{encDriver: e.e}
return
}
func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
gd = genHelperDecoder{d: d}
dd = genHelperDecDriver{decDriver: d.d}
return
}
type genHelperEncDriver struct {
encDriver
}
type genHelperDecDriver struct {
decDriver
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M mustHdl
F fastpathT
e *Encoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
C checkOverflow
F fastpathT
d *Decoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWr() *encWr {
return f.e.w()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshalUtf8(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshalAsis(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshalRaw(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.e.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFnGivenAddr(v interface{}) *codecFn {
return f.e.h.fn(reflect.TypeOf(v).Elem())
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeNumBoolStrKindGivenAddr(v interface{}, encFn *codecFn) {
f.e.encodeValueNonNil(reflect.ValueOf(v).Elem(), encFn)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) {
if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) {
f.e.encodeValueNonNil(reflect.ValueOf(v), nil)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) DecScratchBuffer() []byte {
// return f.d.b[:]
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
rv := reflect.ValueOf(iv)
if chkPtr {
if x, _ := isDecodeable(rv); !x {
f.d.haltAsNotDecodeable(rv)
}
}
f.d.decodeValue(rv, nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
f.d.jsonUnmarshalV(tm)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil)))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.d.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerNext(j, containerLen int, hasLen bool) bool {
// return f.d.containerNext(j, containerLen, hasLen)
// rewriting so it can be inlined
if hasLen {
return j < containerLen
}
return !f.d.checkBreak()
}

View file

@ -1,273 +0,0 @@
// comment this out // + build ignore
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
package codec
import (
"encoding"
"reflect"
)
// GenVersion is the current version of codecgen.
const GenVersion = {{ .Version }}
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
{{/*
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
*/ -}}
// GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelper() (g genHelper) { return }
type genHelper struct {}
func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
ge = genHelperEncoder{e: e}
ee = genHelperEncDriver{encDriver: e.e}
return
}
func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
gd = genHelperDecoder{d: d}
dd = genHelperDecDriver{decDriver: d.d}
return
}
type genHelperEncDriver struct {
encDriver
}
type genHelperDecDriver struct {
decDriver
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M mustHdl
F fastpathT
e *Encoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
C checkOverflow
F fastpathT
d *Decoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWr() *encWr {
return f.e.w()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshalUtf8(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshalAsis(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshalRaw(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.e.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFnGivenAddr(v interface{}) *codecFn { return f.e.h.fn(reflect.TypeOf(v).Elem()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeNumBoolStrKindGivenAddr(v interface{}, encFn *codecFn) {
f.e.encodeValueNonNil(reflect.ValueOf(v).Elem(), encFn)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) {
if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) {
f.e.encodeValueNonNil(reflect.ValueOf(v), nil)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) DecScratchBuffer() []byte {
// return f.d.b[:]
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
rv := reflect.ValueOf(iv)
if chkPtr {
if x, _ := isDecodeable(rv); !x {
f.d.haltAsNotDecodeable(rv)
}
}
f.d.decodeValue(rv, nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
f.d.jsonUnmarshalV(tm)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil)))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.d.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerNext(j, containerLen int, hasLen bool) bool {
// return f.d.containerNext(j, containerLen, hasLen)
// rewriting so it can be inlined
if hasLen {
return j < containerLen
}
return !f.d.checkBreak()
}
{{/*
// MARKER: remove WriteStr, as it cannot be inlined as of 20230201.
// Instead, generated code calls (*encWr).WriteStr directly.
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperEncoder) WriteStr(s string) {
// f.e.encWr.writestr(s)
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
*/ -}}

View file

@ -1,192 +0,0 @@
// +build codecgen.exec
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = `
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := z.DecReadMapStart()
if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} {
*{{ .Varname }} = nil
} else {
if {{var "v"}} == nil {
{{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
{{ $mk := var "mk" -}}
var {{ $mk }} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if z.DecBasicHandle().MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
z.DecReadMapElemKey()
{{ if eq .KTyp "string" -}}
{{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}}
{{ else -}}
{{ decLineVarK $mk -}}
{{ end -}}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}}
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = z.DecStringZC({{var "bv"}})
}
{{ end -}}
{{if decElemKindPtr -}}
{{var "ms"}} = true
{{end -}}
if {{var "mg"}} {
{{if decElemKindPtr -}}
{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}]
if {{var "mok"}} {
{{var "ms"}} = false
}
{{else -}}
{{var "mv"}} = {{var "v"}}[{{ $mk }}]
{{end -}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecReadMapElemValue()
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}}
if {{var "mdn"}} {
{{var "v"}}[{{ $mk }}] = {{decElemZero}}
} else {{if decElemKindPtr}} if {{var "ms"}} {{end}} {
{{var "v"}}[{{ $mk }}] = {{var "mv"}}
}
}
} // else len==0: leave as-is (do not clear map entries)
z.DecReadMapEnd()
}
`
const genDecListTmpl = `
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
{{if not isArray -}}
var {{var "c"}} bool {{/* // changed */}}
_ = {{var "c"}}
if {{var "h"}}.IsNil {
if {{var "v"}} != nil {
{{var "v"}} = nil
{{var "c"}} = true
}
} else {{end -}}
if {{var "l"}} == 0 {
{{if isSlice -}}
if {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
} else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{else if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
}
{{end -}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
_ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
}
{{end -}}
var {{var "j"}} int
{{/* // var {{var "dn"}} bool */ -}}
for {{var "j"}} = 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
}
{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
{{var "c"}} = true
}
{{end -}}
{{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}}
{{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
{{ decLineVar $x -}}
{{var "v"}} <- {{ $x }}
{{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
{{var "c"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end -}}
}
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}}
}
{{end -}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
}
{{end -}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}
{{end -}}
`
const genEncChanTmpl = `
{{.Label}}:
switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
case timeout{{.Sfx}} == 0: // only consume available
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
default:
break {{.Label}}
}
}
case timeout{{.Sfx}} > 0: // consume until timeout
tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
case <-tt{{.Sfx}}.C:
// close(tt.C)
break {{.Label}}
}
}
default: // consume until close
for b{{.Sfx}} := range {{.Chan}} {
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
}
}
`

File diff suppressed because it is too large Load diff

586
vendor/github.com/ugorji/go/codec/gen_mono.go generated vendored Normal file
View file

@ -0,0 +1,586 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build codec.build
package codec
import (
"go/ast"
"go/format"
"go/parser"
"go/token"
"os"
"slices"
"strings"
)
// This tool will monomorphize types scoped to a specific format.
//
// This tool only monomorphized the type Name, and not a function Name.
// Explicitly, generic functions are not supported, as they cannot be monomorphized
// to a specific format without a corresponding name change.
//
// However, for types constrained to encWriter or decReader,
// which are shared across formats, there's no place to put them without duplication.
const genMonoParserMode = parser.AllErrors | parser.SkipObjectResolution
var genMonoSpecialFieldTypes = []string{"helperDecReader"}
// These functions should take the address of first param when monomorphized
var genMonoSpecialFunc4Addr = []string{} // {"decByteSlice"}
var genMonoImportsToSkip = []string{`"errors"`, `"fmt"`, `"net/rpc"`}
var genMonoRefImportsVia_ = [][2]string{
// {"errors", "New"},
}
var genMonoCallsToSkip = []string{"callMake"}
type genMonoFieldState uint
const (
genMonoFieldRecv genMonoFieldState = iota << 1
genMonoFieldParamsResult
genMonoFieldStruct
)
type genMonoImports struct {
set map[string]struct{}
specs []*ast.ImportSpec
}
type genMono struct {
files map[string][]byte
typParam map[string]*ast.Field
typParamTransient map[string]*ast.Field
}
func (x *genMono) init() {
x.files = make(map[string][]byte)
x.typParam = make(map[string]*ast.Field)
x.typParamTransient = make(map[string]*ast.Field)
}
func (x *genMono) reset() {
clear(x.typParam)
clear(x.typParamTransient)
}
func (m *genMono) hdl(hname string) {
m.reset()
m.do(hname, []string{"encode.go", "decode.go", hname + ".go"}, []string{"base.notfastpath.go", "base.notfastpath.notmono.go"}, "", "")
m.do(hname, []string{"base.notfastpath.notmono.go"}, nil, ".notfastpath", ` && (notfastpath || codec.notfastpath)`)
m.do(hname, []string{"base.fastpath.notmono.generated.go"}, []string{"base.fastpath.generated.go"}, ".fastpath", ` && !notfastpath && !codec.notfastpath`)
}
func (m *genMono) do(hname string, fnames, tnames []string, fnameInfx string, buildTagsSfx string) {
// keep m.typParams across whole call, as all others use it
const fnameSfx = ".mono.generated.go"
fname := hname + fnameInfx + fnameSfx
var imports = genMonoImports{set: make(map[string]struct{})}
r1, fset := m.merge(fnames, tnames, &imports)
m.trFile(r1, hname, true)
r2, fset := m.merge(fnames, tnames, &imports)
m.trFile(r2, hname, false)
r0 := genMonoOutInit(imports.specs, fname)
r0.Decls = append(r0.Decls, r1.Decls...)
r0.Decls = append(r0.Decls, r2.Decls...)
// output r1 to a file
f, err := os.Create(fname)
halt.onerror(err)
defer f.Close()
var s genMonoStrBuilder
s.s(`//go:build !notmono && !codec.notmono `).s(buildTagsSfx).s(`
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
`)
_, err = f.Write(s.v)
halt.onerror(err)
err = format.Node(f, fset, r0)
halt.onerror(err)
}
func (x *genMono) file(fname string) (b []byte) {
b = x.files[fname]
if b == nil {
var err error
b, err = os.ReadFile(fname)
halt.onerror(err)
x.files[fname] = b
}
return
}
func (x *genMono) merge(fNames, tNames []string, imports *genMonoImports) (dst *ast.File, fset *token.FileSet) {
// typParams used in fnLoadTyps
var typParams map[string]*ast.Field
var loadTyps bool
fnLoadTyps := func(node ast.Node) bool {
var ok bool
switch n := node.(type) {
case *ast.GenDecl:
if n.Tok == token.TYPE {
for _, v := range n.Specs {
nn := v.(*ast.TypeSpec)
ok = genMonoTypeParamsOk(nn.TypeParams)
if ok {
// each decl will have only 1 var/type
typParams[nn.Name.Name] = nn.TypeParams.List[0]
if loadTyps {
dst.Decls = append(dst.Decls, &ast.GenDecl{Tok: n.Tok, Specs: []ast.Spec{v}})
}
}
}
}
return false
}
return true
}
// we only merge top-level methods and types
fnIdX := func(n *ast.FuncDecl, n2 *ast.IndexExpr) (ok bool) {
n9, ok9 := n2.Index.(*ast.Ident)
n3, ok := n2.X.(*ast.Ident) // n3 = type name
ok = ok && ok9 && n9.Name == "T"
if ok {
_, ok = x.typParam[n3.Name]
}
return
}
fnLoadMethodsAndImports := func(node ast.Node) bool {
var ok bool
switch n := node.(type) {
case *ast.FuncDecl:
// TypeParams is nil for methods, as it is defined at the type node
// instead, look at the name, and
// if IndexExpr.Index=T, and IndexExpr.X matches a type name seen already
// then ok = true
if n.Recv == nil || len(n.Recv.List) != 1 {
return false
}
ok = false
switch nn := n.Recv.List[0].Type.(type) {
case *ast.IndexExpr:
ok = fnIdX(n, nn)
case *ast.StarExpr:
switch nn2 := nn.X.(type) {
case *ast.IndexExpr:
ok = fnIdX(n, nn2)
}
}
if ok {
dst.Decls = append(dst.Decls, n)
}
return false
case *ast.GenDecl:
if n.Tok == token.IMPORT {
for _, v := range n.Specs {
nn := v.(*ast.ImportSpec)
if slices.Contains(genMonoImportsToSkip, nn.Path.Value) {
continue
}
if _, ok = imports.set[nn.Path.Value]; !ok {
imports.specs = append(imports.specs, nn)
imports.set[nn.Path.Value] = struct{}{}
}
}
}
return false
}
return true
}
fset = token.NewFileSet()
fnLoadAsts := func(names []string) (asts []*ast.File) {
for _, fname := range names {
fsrc := x.file(fname)
f, err := parser.ParseFile(fset, fname, fsrc, genMonoParserMode)
halt.onerror(err)
asts = append(asts, f)
}
return
}
clear(x.typParamTransient)
dst = &ast.File{
Name: &ast.Ident{Name: "codec"},
}
fs := fnLoadAsts(fNames)
ts := fnLoadAsts(tNames)
loadTyps = true
typParams = x.typParam
for _, v := range fs {
ast.Inspect(v, fnLoadTyps)
}
loadTyps = false
typParams = x.typParamTransient
for _, v := range ts {
ast.Inspect(v, fnLoadTyps)
}
typParams = nil
for _, v := range fs {
ast.Inspect(v, fnLoadMethodsAndImports)
}
return
}
func (x *genMono) trFile(r *ast.File, hname string, isbytes bool) {
fn := func(node ast.Node) bool {
switch n := node.(type) {
case *ast.TypeSpec:
// type x[T encDriver] struct { ... }
if !genMonoTypeParamsOk(n.TypeParams) {
return false
}
x.trType(n, hname, isbytes)
return false
case *ast.FuncDecl:
if n.Recv == nil || len(n.Recv.List) != 1 {
return false
}
if _, ok := n.Recv.List[0].Type.(*ast.Ident); ok {
return false
}
tp := x.trMethodSign(n, hname, isbytes) // receiver, params, results
// handle the body
x.trMethodBody(n.Body, tp, hname, isbytes)
return false
}
return true
}
ast.Inspect(r, fn)
// set type params to nil, and Pos to NoPos
fn = func(node ast.Node) bool {
switch n := node.(type) {
case *ast.FuncType:
if genMonoTypeParamsOk(n.TypeParams) {
n.TypeParams = nil
}
case *ast.TypeSpec: // for type ...
if genMonoTypeParamsOk(n.TypeParams) {
n.TypeParams = nil
}
}
return true
}
ast.Inspect(r, fn)
}
func (x *genMono) trType(n *ast.TypeSpec, hname string, isbytes bool) {
sfx, _, _, hnameUp := genMonoIsBytesVals(hname, isbytes)
tp := n.TypeParams.List[0]
switch tp.Type.(*ast.Ident).Name {
case "encDriver", "decDriver":
n.Name.Name += hnameUp + sfx
case "encWriter", "decReader":
n.Name.Name += sfx
}
// handle the Struct and Array types
switch nn := n.Type.(type) {
case *ast.StructType:
x.trStruct(nn, tp, hname, isbytes)
case *ast.ArrayType:
x.trArray(nn, tp, hname, isbytes)
}
}
func (x *genMono) trMethodSign(n *ast.FuncDecl, hname string, isbytes bool) (tp *ast.Field) {
// check if recv type is not parameterized
tp = x.trField(n.Recv.List[0], nil, hname, isbytes, genMonoFieldRecv)
// handle params and results
x.trMethodSignNonRecv(n.Type.Params, tp, hname, isbytes)
x.trMethodSignNonRecv(n.Type.Results, tp, hname, isbytes)
return
}
func (x *genMono) trMethodSignNonRecv(r *ast.FieldList, tp *ast.Field, hname string, isbytes bool) {
if r == nil || len(r.List) == 0 {
return
}
for _, v := range r.List {
x.trField(v, tp, hname, isbytes, genMonoFieldParamsResult)
}
}
func (x *genMono) trStruct(r *ast.StructType, tp *ast.Field, hname string, isbytes bool) {
// search for fields, and update accordingly
// type x[T encDriver] struct { w T }
// var x *A[T]
// A[T]
if r == nil || r.Fields == nil || len(r.Fields.List) == 0 {
return
}
for _, v := range r.Fields.List {
x.trField(v, tp, hname, isbytes, genMonoFieldStruct)
}
}
func (x *genMono) trArray(n *ast.ArrayType, tp *ast.Field, hname string, isbytes bool) {
sfx, _, _, hnameUp := genMonoIsBytesVals(hname, isbytes)
// type fastpathEs[T encDriver] [56]fastpathE[T]
// p := tp.Names[0].Name
switch elt := n.Elt.(type) {
// case *ast.InterfaceType:
case *ast.IndexExpr:
if elt.Index.(*ast.Ident).Name == "T" { // generic
n.Elt = ast.NewIdent(elt.X.(*ast.Ident).Name + hnameUp + sfx)
}
}
}
func (x *genMono) trMethodBody(r *ast.BlockStmt, tp *ast.Field, hname string, isbytes bool) {
// find the parent node for an indexExpr, or a T/*T, and set the value back in there
fn := func(pnode ast.Node) bool {
var pn *ast.Ident
fnUp := func() {
x.updateIdentForT(pn, hname, tp, isbytes, false)
}
switch n := pnode.(type) {
// case *ast.SelectorExpr:
// case *ast.TypeAssertExpr:
// case *ast.IndexExpr:
case *ast.StarExpr:
if genMonoUpdateIndexExprT(&pn, n.X) {
n.X = pn
fnUp()
}
case *ast.CallExpr:
for i4, n4 := range n.Args {
if genMonoUpdateIndexExprT(&pn, n4) {
n.Args[i4] = pn
fnUp()
}
}
if n4, ok4 := n.Fun.(*ast.Ident); ok4 && slices.Contains(genMonoSpecialFunc4Addr, n4.Name) {
n.Args[0] = &ast.UnaryExpr{Op: token.AND, X: n.Args[0].(*ast.SelectorExpr)}
}
case *ast.CompositeLit:
if genMonoUpdateIndexExprT(&pn, n.Type) {
n.Type = pn
fnUp()
}
case *ast.ArrayType:
if genMonoUpdateIndexExprT(&pn, n.Elt) {
n.Elt = pn
fnUp()
}
case *ast.ValueSpec:
for i2, n2 := range n.Values {
if genMonoUpdateIndexExprT(&pn, n2) {
n.Values[i2] = pn
fnUp()
}
}
if genMonoUpdateIndexExprT(&pn, n.Type) {
n.Type = pn
fnUp()
}
case *ast.BinaryExpr:
// early return here, since the 2 things can apply
if genMonoUpdateIndexExprT(&pn, n.X) {
n.X = pn
fnUp()
}
if genMonoUpdateIndexExprT(&pn, n.Y) {
n.Y = pn
fnUp()
}
return true
}
return true
}
ast.Inspect(r, fn)
}
func (x *genMono) trField(f *ast.Field, tpt *ast.Field, hname string, isbytes bool, state genMonoFieldState) (tp *ast.Field) {
var pn *ast.Ident
switch nn := f.Type.(type) {
case *ast.IndexExpr:
if genMonoUpdateIndexExprT(&pn, nn) {
f.Type = pn
}
case *ast.StarExpr:
if genMonoUpdateIndexExprT(&pn, nn.X) {
nn.X = pn
}
case *ast.FuncType:
x.trMethodSignNonRecv(nn.Params, tpt, hname, isbytes)
x.trMethodSignNonRecv(nn.Results, tpt, hname, isbytes)
return
case *ast.ArrayType:
x.trArray(nn, tpt, hname, isbytes)
return
case *ast.Ident:
if state == genMonoFieldRecv || nn.Name != "T" {
return
}
pn = nn // "T"
if state == genMonoFieldParamsResult {
f.Type = &ast.StarExpr{X: pn}
}
}
if pn == nil {
return
}
tp = x.updateIdentForT(pn, hname, tpt, isbytes, true)
return
}
func (x *genMono) updateIdentForT(pn *ast.Ident, hname string, tp *ast.Field,
isbytes bool, lookupTP bool) (tp2 *ast.Field) {
sfx, writer, reader, hnameUp := genMonoIsBytesVals(hname, isbytes)
// handle special ones e.g. helperDecReader et al
if slices.Contains(genMonoSpecialFieldTypes, pn.Name) {
pn.Name += sfx
return
}
if pn.Name != "T" && lookupTP {
tp = x.typParam[pn.Name]
if tp == nil {
tp = x.typParamTransient[pn.Name]
}
}
paramtyp := tp.Type.(*ast.Ident).Name
if pn.Name == "T" {
switch paramtyp {
case "encDriver", "decDriver":
pn.Name = hname + genMonoTitleCase(paramtyp) + sfx
case "encWriter":
pn.Name = writer
case "decReader":
pn.Name = reader
}
} else {
switch paramtyp {
case "encDriver", "decDriver":
pn.Name += hnameUp + sfx
case "encWriter", "decReader":
pn.Name += sfx
}
}
return tp
}
func genMonoUpdateIndexExprT(pn **ast.Ident, node ast.Node) (pnok bool) {
*pn = nil
if n2, ok := node.(*ast.IndexExpr); ok {
n9, ok9 := n2.Index.(*ast.Ident)
n3, ok := n2.X.(*ast.Ident)
if ok && ok9 && n9.Name == "T" {
*pn, pnok = ast.NewIdent(n3.Name), true
}
}
return
}
func genMonoTitleCase(s string) string {
return strings.ToUpper(s[:1]) + s[1:]
}
func genMonoIsBytesVals(hName string, isbytes bool) (suffix, writer, reader, hNameUp string) {
hNameUp = genMonoTitleCase(hName)
if isbytes {
return "Bytes", "bytesEncAppender", "bytesDecReader", hNameUp
}
return "IO", "bufioEncWriter", "ioDecReader", hNameUp
}
func genMonoTypeParamsOk(v *ast.FieldList) (ok bool) {
if v == nil || v.List == nil || len(v.List) != 1 {
return false
}
pn := v.List[0]
if len(pn.Names) != 1 {
return false
}
pnName := pn.Names[0].Name
if pnName != "T" {
return false
}
// ignore any nodes which are not idents e.g. cmp.orderedRv
vv, ok := pn.Type.(*ast.Ident)
if !ok {
return false
}
switch vv.Name {
case "encDriver", "decDriver", "encWriter", "decReader":
return true
}
return false
}
func genMonoCopy(src *ast.File) (dst *ast.File) {
dst = &ast.File{
Name: &ast.Ident{Name: "codec"},
}
dst.Decls = append(dst.Decls, src.Decls...)
return
}
type genMonoStrBuilder struct {
v []byte
}
func (x *genMonoStrBuilder) s(v string) *genMonoStrBuilder {
x.v = append(x.v, v...)
return x
}
func genMonoOutInit(importSpecs []*ast.ImportSpec, fname string) (f *ast.File) {
// ParseFile seems to skip the //go:build stanza
// it should be written directly into the file
var s genMonoStrBuilder
s.s(`
package codec
import (
`)
for _, v := range importSpecs {
s.s("\t").s(v.Path.Value).s("\n")
}
s.s(")\n")
for _, v := range genMonoRefImportsVia_ {
s.s("var _ = ").s(v[0]).s(".").s(v[1]).s("\n")
}
f, err := parser.ParseFile(token.NewFileSet(), fname, s.v, genMonoParserMode)
halt.onerror(err)
return
}
func genMonoAll() {
// hdls := []Handle{
// (*SimpleHandle)(nil),
// (*JsonHandle)(nil),
// (*CborHandle)(nil),
// (*BincHandle)(nil),
// (*MsgpackHandle)(nil),
// }
hdls := []string{"simple", "json", "cbor", "binc", "msgpack"}
var m genMono
m.init()
for _, v := range hdls {
m.hdl(v)
}
}

View file

@ -1,15 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.5
// +build go1.5
package codec
import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
return reflect.ArrayOf(count, elem)
}

View file

@ -1,20 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package codec
import (
"errors"
"reflect"
)
const reflectArrayOfSupported = false
var errNoReflectArrayOf = errors.New("codec: reflect.ArrayOf unsupported by this go version")
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
panic(errNoReflectArrayOf)
}

View file

@ -0,0 +1,20 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.21
package codec
import "errors"
// Moving forward, this codec package will support at least the last 4 major Go releases.
//
// As of early summer 2025, codec will support go 1.21, 1.22, 1.23, 1.24 releases of go.
// This allows use of the followin:
// - stabilized generics
// - min/max/clear
// - slice->array conversion
func init() {
panic(errors.New("codec: supports go 1.21 and above only"))
}

View file

@ -1,13 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.5
// +build go1.5
package codec
import "time"
func fmtTime(t time.Time, fmt string, b []byte) []byte {
return t.AppendFormat(b, fmt)
}

View file

@ -1,16 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package codec
import "time"
func fmtTime(t time.Time, fmt string, b []byte) []byte {
s := t.Format(fmt)
b = b[:len(s)]
copy(b, s)
return b
}

View file

@ -1,28 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.20 && !safe && !codec.safe && !appengine
// +build go1.20,!safe,!codec.safe,!appengine
package codec
import (
_ "reflect" // needed for go linkname(s)
"unsafe"
)
func growslice(typ unsafe.Pointer, old unsafeSlice, num int) (s unsafeSlice) {
// culled from GOROOT/runtime/slice.go
num -= old.Cap - old.Len
s = rtgrowslice(old.Data, old.Cap+num, old.Cap, num, typ)
s.Len = old.Len
return
}
//go:linkname rtgrowslice runtime.growslice
//go:noescape
func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice
// //go:linkname growslice reflect.growslice
// //go:noescape
// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice

View file

@ -1,16 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.9 && !go1.20 && !safe && !codec.safe && !appengine
// +build go1.9,!go1.20,!safe,!codec.safe,!appengine
package codec
import (
_ "runtime" // needed for go linkname(s)
"unsafe"
)
//go:linkname growslice runtime.growslice
//go:noescape
func growslice(typ unsafe.Pointer, old unsafeSlice, num int) unsafeSlice

View file

@ -1,13 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMap(t)
}

View file

@ -1,14 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.10 && (safe || codec.safe || appengine)
// +build go1.10
// +build safe codec.safe appengine
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMapWithSize(t, size)
}

View file

@ -1,25 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.10 && !safe && !codec.safe && !appengine
// +build go1.10,!safe,!codec.safe,!appengine
package codec
import (
"reflect"
"unsafe"
)
func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) {
t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
urv.typ = t
urv.flag = uintptr(reflect.Map)
urv.ptr = makemap(t, size, nil)
return
}
//go:linkname makemap runtime.makemap
//go:noescape
func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer

View file

@ -1,41 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.12 && (safe || codec.safe || appengine)
// +build go1.12
// +build safe codec.safe appengine
package codec
import "reflect"
type mapIter struct {
t *reflect.MapIter
m reflect.Value
values bool
}
func (t *mapIter) Next() (r bool) {
return t.t.Next()
}
func (t *mapIter) Key() reflect.Value {
return t.t.Key()
}
func (t *mapIter) Value() (r reflect.Value) {
if t.values {
return t.t.Value()
}
return
}
func (t *mapIter) Done() {}
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
*t = mapIter{
m: m,
t: m.MapRange(),
values: values,
}
}

View file

@ -1,45 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.7 && !go1.12 && (safe || codec.safe || appengine)
// +build go1.7
// +build !go1.12
// +build safe codec.safe appengine
package codec
import "reflect"
type mapIter struct {
m reflect.Value
keys []reflect.Value
j int
values bool
}
func (t *mapIter) Next() (r bool) {
t.j++
return t.j < len(t.keys)
}
func (t *mapIter) Key() reflect.Value {
return t.keys[t.j]
}
func (t *mapIter) Value() (r reflect.Value) {
if t.values {
return t.m.MapIndex(t.keys[t.j])
}
return
}
func (t *mapIter) Done() {}
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
*t = mapIter{
m: m,
keys: m.MapKeys(),
values: values,
j: -1,
}
}

View file

@ -0,0 +1,16 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && !go1.24
package codec
import "unsafe"
// retrofited from hIter struct
type unsafeMapIterPadding struct {
_ [6]unsafe.Pointer // padding: *maptype, *hmap, buckets, *bmap, overflow, oldoverflow,
_ [4]uintptr // padding: uintptr, uint8, bool fields
_ uintptr // padding: wasted (try to fill cache-line at multiple of 4)
}

View file

@ -0,0 +1,15 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.24
package codec
import "unsafe"
// retrofited from linknameIter struct (compatibility layer for swissmaps)
type unsafeMapIterPadding struct {
_ [2]unsafe.Pointer // padding: *abi.SwissMapType, *maps.Iter
_ uintptr // padding: wasted (try to fill cache-line at multiple of 4)
}

View file

@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.10
// +build go1.10
package codec
const allowSetUnexportedEmbeddedPtr = false

View file

@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
package codec
const allowSetUnexportedEmbeddedPtr = true

View file

@ -1,22 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.4
// +build !go1.4
package codec
import "errors"
// This codec package will only work for go1.4 and above.
// This is for the following reasons:
// - go 1.4 was released in 2014
// - go runtime is written fully in go
// - interface only holds pointers
// - reflect.Value is stabilized as 3 words
var errCodecSupportedOnlyFromGo14 = errors.New("codec: go 1.3 and below are not supported")
func init() {
panic(errCodecSupportedOnlyFromGo14)
}

View file

@ -1,11 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.5 && !go1.6
// +build go1.5,!go1.6
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"

View file

@ -1,11 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.6 && !go1.7
// +build go1.6,!go1.7
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"

View file

@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.7
// +build go1.7
package codec
const genCheckVendor = true

View file

@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package codec
var genCheckVendor = false

File diff suppressed because it is too large Load diff

View file

@ -1,147 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// maxArrayLen is the size of uint, which determines
// the maximum length of any array.
const maxArrayLen = 1<<((32<<(^uint(0)>>63))-1) - 1
// All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions).
func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 {
} else if pos && v[0] == 0 {
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
}
} else if !pos && v[0] == 0xff {
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
}
}
return
}
func halfFloatToFloatBits(h uint16) (f uint32) {
// retrofitted from:
// - OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
s := uint32(h >> 15)
m := uint32(h & 0x03ff)
e := int32((h >> 10) & 0x1f)
if e == 0 {
if m == 0 { // plus or minus 0
return s << 31
}
// Denormalized number -- renormalize it
for (m & 0x0400) == 0 {
m <<= 1
e -= 1
}
e += 1
m &= ^uint32(0x0400)
} else if e == 31 {
if m == 0 { // Inf
return (s << 31) | 0x7f800000
}
return (s << 31) | 0x7f800000 | (m << 13) // NaN
}
e = e + (127 - 15)
m = m << 13
return (s << 31) | (uint32(e) << 23) | m
}
func floatToHalfFloatBits(i uint32) (h uint16) {
// retrofitted from:
// - OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
// - http://www.java2s.com/example/java-utility-method/float-to/floattohalf-float-f-fae00.html
s := (i >> 16) & 0x8000
e := int32(((i >> 23) & 0xff) - (127 - 15))
m := i & 0x7fffff
var h32 uint32
if e <= 0 {
if e < -10 { // zero
h32 = s // track -0 vs +0
} else {
m = (m | 0x800000) >> uint32(1-e)
h32 = s | (m >> 13)
}
} else if e == 0xff-(127-15) {
if m == 0 { // Inf
h32 = s | 0x7c00
} else { // NAN
m >>= 13
var me uint32
if m == 0 {
me = 1
}
h32 = s | 0x7c00 | m | me
}
} else {
if e > 30 { // Overflow
h32 = s | 0x7c00
} else {
h32 = s | (uint32(e) << 10) | (m >> 13)
}
}
h = uint16(h32)
return
}
// growCap will return a new capacity for a slice, given the following:
// - oldCap: current capacity
// - unit: in-memory size of an element
// - num: number of elements to add
func growCap(oldCap, unit, num uint) (newCap uint) {
// appendslice logic (if cap < 1024, *2, else *1.25):
// leads to many copy calls, especially when copying bytes.
// bytes.Buffer model (2*cap + n): much better for bytes.
// smarter way is to take the byte-size of the appended element(type) into account
// maintain 1 thresholds:
// t1: if cap <= t1, newcap = 2x
// else newcap = 1.5x
//
// t1 is always >= 1024.
// This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same)
//
// With this, appending for bytes increase by:
// 100% up to 4K
// 50% beyond that
// unit can be 0 e.g. for struct{}{}; handle that appropriately
maxCap := num + (oldCap * 3 / 2)
if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc
return maxArrayLen
}
var t1 uint = 1024 // default thresholds for large values
if unit <= 4 {
t1 = 8 * 1024
} else if unit <= 16 {
t1 = 2 * 1024
}
newCap = 2 + num
if oldCap > 0 {
if oldCap <= t1 { // [0,t1]
newCap = num + (oldCap * 2)
} else { // (t1,infinity]
newCap = maxCap
}
}
// ensure newCap takes multiples of a cache line (size is a multiple of 64)
t1 = newCap * unit
if t2 := t1 % 64; t2 != 0 {
t1 += 64 - t2
newCap = t1 / unit
}
return
}

View file

@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.9 || safe || codec.safe || appengine
// +build !go1.9 safe codec.safe appengine
package codec
@ -19,8 +18,11 @@ import (
const safeMode = true
const transientSizeMax = 0
const transientValueHasStringSlice = true
func isTransientType4Size(size uint32) bool { return true }
type mapReqParams struct{}
func getMapReqParams(ti *typeInfo) (r mapReqParams) { return }
func byteAt(b []byte, index uint) byte {
return b[index]
@ -30,14 +32,6 @@ func setByteAt(b []byte, index uint, val byte) {
b[index] = val
}
func byteSliceOf(b []byte, start, end uint) []byte {
return b[start:end]
}
// func byteSliceWithLen(b []byte, length uint) []byte {
// return b[:length]
// }
func stringView(v []byte) string {
return string(v)
}
@ -50,34 +44,26 @@ func byteSliceSameData(v1 []byte, v2 []byte) bool {
return cap(v1) != 0 && cap(v2) != 0 && &(v1[:1][0]) == &(v2[:1][0])
}
func okBytes2(b []byte) (v [2]byte) {
copy(v[:], b)
return
}
func okBytes3(b []byte) (v [3]byte) {
copy(v[:], b)
return
}
func okBytes4(b []byte) (v [4]byte) {
copy(v[:], b)
return
}
func okBytes8(b []byte) (v [8]byte) {
copy(v[:], b)
return
}
func isNil(v interface{}) (rv reflect.Value, isnil bool) {
func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) {
b = v == nil
if b || !checkPtr {
return
}
rv = reflect.ValueOf(v)
if isnilBitset.isset(byte(rv.Kind())) {
isnil = rv.IsNil()
if rv.Kind() == reflect.Ptr {
b = rv.IsNil()
}
return
}
func ptrToLowLevel(v interface{}) interface{} {
return v
}
func lowLevelToPtr[T any](v interface{}) *T {
return v.(*T)
}
func eq4i(i0, i1 interface{}) bool {
return i0 == i1
}
@ -85,17 +71,21 @@ func eq4i(i0, i1 interface{}) bool {
func rv4iptr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
func rv4istr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
// func rv4i(i interface{}) reflect.Value { return reflect.ValueOf(i) }
// func rv4iK(i interface{}, kind byte, isref bool) reflect.Value { return reflect.ValueOf(i) }
func rv2i(rv reflect.Value) interface{} {
return rv.Interface()
if rv.IsValid() {
return rv.Interface()
}
return nil
}
func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
return rv.Addr()
}
func rvPtrIsNil(rv reflect.Value) bool {
return rv.IsNil()
}
func rvIsNil(rv reflect.Value) bool {
return rv.IsNil()
}
@ -131,6 +121,30 @@ func i2rtid(i interface{}) uintptr {
// --------------------------
// is this an empty interface/ptr/struct/map/slice/chan/array
func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) (empty bool) {
switch v.Kind() {
case reflect.Array:
for i, vlen := 0, v.Len(); i < vlen; i++ {
if !isEmptyValue(v.Index(i), tinfos, false) {
return false
}
}
return true
case reflect.Map, reflect.Slice, reflect.Chan:
return v.IsNil() || v.Len() == 0
case reflect.Interface, reflect.Ptr:
empty = v.IsNil()
if recursive && !empty {
return isEmptyValue(v.Elem(), tinfos, recursive)
}
return empty
case reflect.Struct:
return isEmptyStruct(v, tinfos, recursive)
}
return false
}
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
switch v.Kind() {
case reflect.Invalid:
@ -215,7 +229,7 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
// We only care about what we can encode/decode,
// so that is what we use to check omitEmpty.
for _, si := range ti.sfi.source() {
sfv := si.path.field(v)
sfv := si.fieldNoAlloc(v, true)
if sfv.IsValid() && !isEmptyValue(sfv, tinfos, recursive) {
return false
}
@ -223,6 +237,10 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
return true
}
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMapWithSize(t, size)
}
// --------------------------
type perTypeElem struct {
@ -247,13 +265,9 @@ type perType struct {
v []perTypeElem
}
type decPerType struct {
perType
}
type decPerType = perType
type encPerType struct {
perType
}
type encPerType = perType
func (x *perType) elem(t reflect.Type) *perTypeElem {
rtid := rt2id(t)
@ -295,10 +309,44 @@ func (x *perType) AddressableRO(v reflect.Value) (rv reflect.Value) {
return
}
// --------------------------
type mapIter struct {
t *reflect.MapIter
m reflect.Value
values bool
}
func (t *mapIter) Next() (r bool) {
return t.t.Next()
}
func (t *mapIter) Key() reflect.Value {
return t.t.Key()
}
func (t *mapIter) Value() (r reflect.Value) {
if t.values {
return t.t.Value()
}
return
}
func (t *mapIter) Done() {}
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
*t = mapIter{
m: m,
t: m.MapRange(),
values: values,
}
}
// --------------------------
type structFieldInfos struct {
c []*structFieldInfo
s []*structFieldInfo
t uint8To32TrieNode
// byName map[string]*structFieldInfo // find sfi given a name
}
func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
@ -306,55 +354,24 @@ func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
x.s = sorted
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s }
// func (x *structFieldInfos) count() int { return len(x.c) }
func (x *structFieldInfos) source() (v []*structFieldInfo) { return x.c }
type atomicClsErr struct {
v atomic.Value
}
func (x *atomicClsErr) load() (e clsErr) {
if i := x.v.Load(); i != nil {
e = i.(clsErr)
}
return
}
func (x *atomicClsErr) store(p clsErr) {
x.v.Store(p)
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s }
// --------------------------
type atomicTypeInfoSlice struct {
v atomic.Value
type uint8To32TrieNodeNoKids struct {
key uint8
valid bool // the value marks the end of a full stored string
_ [2]byte // padding
value uint32
}
func (x *atomicTypeInfoSlice) load() (e []rtid2ti) {
if i := x.v.Load(); i != nil {
e = i.([]rtid2ti)
}
return
}
type uint8To32TrieNodeKids = []uint8To32TrieNode
func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
x.v.Store(p)
}
// --------------------------
type atomicRtidFnSlice struct {
v atomic.Value
}
func (x *atomicRtidFnSlice) load() (e []codecRtidFn) {
if i := x.v.Load(); i != nil {
e = i.([]codecRtidFn)
}
return
}
func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
x.v.Store(p)
}
func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) { x.kids = kids }
func (x *uint8To32TrieNode) getKids() []uint8To32TrieNode { return x.kids }
func (x *uint8To32TrieNode) truncKids() { x.kids = x.kids[:0] } // set len to 0
// --------------------------
func (n *fauxUnion) ru() reflect.Value {
@ -501,13 +518,13 @@ func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value
// ----------------
func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
func rvArrayIndex(rv reflect.Value, i int, _ *typeInfo, _ bool) reflect.Value {
return rv.Index(i)
}
func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
return rv.Index(i)
}
// func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
// return rv.Index(i)
// }
func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
return reflect.MakeSlice(t, 0, 0)
@ -523,7 +540,7 @@ func rvCapSlice(rv reflect.Value) int {
func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
l := rv.Len()
if scratch == nil || rv.CanAddr() {
if scratch == nil && rv.CanAddr() {
return rv.Slice(0, l).Bytes()
}
@ -537,7 +554,7 @@ func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
}
func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
v = rvZeroAddrK(reflectArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array)
v = rvZeroAddrK(reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array)
reflect.Copy(v, rv)
return
}
@ -647,60 +664,43 @@ func rvLenMap(rv reflect.Value) int {
return rv.Len()
}
// func copybytes(to, from []byte) int {
// return copy(to, from)
// }
// func copybytestr(to []byte, from string) int {
// return copy(to, from)
// }
// func rvLenArray(rv reflect.Value) int { return rv.Len() }
// ------------ map range and map indexing ----------
func mapStoresElemIndirect(elemsize uintptr) bool { return false }
func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) {
func mapSet(m, k, v reflect.Value, _ mapReqParams) {
m.SetMapIndex(k, v)
}
func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) (vv reflect.Value) {
func mapGet(m, k, v reflect.Value, _ mapReqParams) (vv reflect.Value) {
return m.MapIndex(k)
}
// func mapDelete(m, k reflect.Value) {
// m.SetMapIndex(k, reflect.Value{})
// }
func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (r reflect.Value) {
return // reflect.New(t).Elem()
}
// ---------- ENCODER optimized ---------------
func (e *Encoder) jsondriver() *jsonEncDriver {
return e.e.(*jsonEncDriver)
}
// ---------- DECODER optimized ---------------
func (d *Decoder) jsondriver() *jsonDecDriver {
return d.d.(*jsonDecDriver)
}
func (d *Decoder) stringZC(v []byte) (s string) {
return d.string(v)
}
func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
return d.string(*kstr2bs)
func (d *decoderBase) bytes2Str(in []byte, att dBytesAttachState) (s string, mutable bool) {
return d.detach2Str(in, att), false
}
// ---------- structFieldInfo optimized ---------------
func (n *structFieldInfoPathNode) rvField(v reflect.Value) reflect.Value {
func (n *structFieldInfoNode) rvField(v reflect.Value) reflect.Value {
return v.Field(int(n.index))
}
// ---------- others ---------------
// --------------------------
type atomicRtidFnSlice struct {
v atomic.Value
}
func (x *atomicRtidFnSlice) load() interface{} {
return x.v.Load()
}
func (x *atomicRtidFnSlice) store(p interface{}) {
x.v.Store(p)
}

View file

@ -1,21 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.9 || safe || codec.safe || appengine || !gc
// +build !go1.9 safe codec.safe appengine !gc
package codec
// import "reflect"
// This files contains safe versions of the code where the unsafe versions are not supported
// in either gccgo or gollvm.
//
// - rvType:
// reflect.toType is not supported in gccgo, gollvm.
// func rvType(rv reflect.Value) reflect.Type {
// return rv.Type()
// }
var _ = 0

View file

@ -0,0 +1,59 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build safe || codec.safe || !gc
package codec
// growCap will return a new capacity for a slice, given the following:
// - oldCap: current capacity
// - unit: in-memory size of an element
// - num: number of elements to add
func growCap(oldCap, unit, num uint) (newCap uint) {
// appendslice logic (if cap < 1024, *2, else *1.25):
// leads to many copy calls, especially when copying bytes.
// bytes.Buffer model (2*cap + n): much better for bytes.
// smarter way is to take the byte-size of the appended element(type) into account
// maintain 1 thresholds:
// t1: if cap <= t1, newcap = 2x
// else newcap = 1.5x
//
// t1 is always >= 1024.
// This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same)
//
// With this, appending for bytes increase by:
// 100% up to 4K
// 50% beyond that
// unit can be 0 e.g. for struct{}{}; handle that appropriately
maxCap := num + (oldCap * 3 / 2)
if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc
return maxArrayLen
}
var t1 uint = 1024 // default thresholds for large values
if unit <= 4 {
t1 = 8 * 1024
} else if unit <= 16 {
t1 = 2 * 1024
}
newCap = 2 + num
if oldCap > 0 {
if oldCap <= t1 { // [0,t1]
newCap = num + (oldCap * 2)
} else { // (t1,infinity]
newCap = maxCap
}
}
// ensure newCap takes multiples of a cache line (size is a multiple of 64)
t1 = newCap * unit
if t2 := t1 % 64; t2 != 0 {
t1 += 64 - t2
newCap = t1 / unit
}
return
}

View file

@ -1,12 +1,15 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.9
// +build !safe,!codec.safe,!appengine,go1.9
//go:build !safe && !codec.safe && !appengine && go1.21
// minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
// - typedmemclr was introduced in go 1.8
// - mapassign_fastXXX was introduced in go 1.9
// minimum of go 1.21 is needed, as that is the minimum for all features and linked functions we need
// - typedmemclr : go1.8
// - mapassign_fastXXX: go1.9
// - clear was added in go1.21
// - unsafe.String(Data): go1.20
// - unsafe.Add: go1.17
// - generics/any: go1.18
// etc
package codec
@ -21,7 +24,7 @@ import (
// This file has unsafe variants of some helper functions.
// MARKER: See helper_unsafe.go for the usage documentation.
//
// There are a number of helper_*unsafe*.go files.
//
// - helper_unsafe
@ -41,19 +44,32 @@ import (
// As of March 2021, we cannot differentiate whether running with gccgo or gollvm
// using a build constraint, as both satisfy 'gccgo' build tag.
// Consequently, we must use the lowest common denominator to support both.
//
// For reflect.Value code, we decided to do the following:
// - if we know the kind, we can elide conditional checks for
// - SetXXX (Int, Uint, String, Bool, etc)
// - SetLen
//
// We can also optimize
// - IsNil
// We can also optimize many others, incl IsNil, etc
//
// MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
// - rvCopySlice: called by decode if rvGrowSlice did not set new slice into pointer to orig slice.
// however, helper_unsafe sets it, so no need to call rvCopySlice later
// - rvSlice: same as above
//
// MARKER: Handling flagIndir ----
//
// flagIndir means that the reflect.Value holds a pointer to the data itself.
//
// flagIndir can be set for:
// - references
// Here, type.IfaceIndir() --> false
// flagIndir is usually false (except when the value is addressable, where in flagIndir may be true)
// - everything else (numbers, bools, string, slice, struct, etc).
// Here, type.IfaceIndir() --> true
// flagIndir is always true
//
// This knowlege is used across this file, e.g. in rv2i and rvRefPtr
const safeMode = false
@ -88,7 +104,9 @@ const (
const transientSizeMax = 64
// should struct/array support internal strings and slices?
const transientValueHasStringSlice = false
// const transientValueHasStringSlice = false
func isTransientType4Size(size uint32) bool { return size <= transientSizeMax }
type unsafeString struct {
Data unsafe.Pointer
@ -144,7 +162,8 @@ func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
x.slice = unsafeSlice{} // memclr
return unsafe.Pointer(&x.slice)
}
x.arr = [transientSizeMax]byte{} // memclr
clear(x.arr[:])
// x.arr = [transientSizeMax]byte{} // memclr
return unsafe.Pointer(&x.arr)
}
@ -152,9 +171,7 @@ type perType struct {
elems [2]unsafePerTypeElem
}
type decPerType struct {
perType
}
type decPerType = perType
type encPerType struct{}
@ -183,19 +200,6 @@ func byteAt(b []byte, index uint) byte {
return *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index)))
}
func byteSliceOf(b []byte, start, end uint) []byte {
s := (*unsafeSlice)(unsafe.Pointer(&b))
s.Data = unsafe.Pointer(uintptr(s.Data) + uintptr(start))
s.Len = int(end - start)
s.Cap -= int(start)
return b
}
// func byteSliceWithLen(b []byte, length uint) []byte {
// (*unsafeSlice)(unsafe.Pointer(&b)).Len = int(length)
// return b
// }
func setByteAt(b []byte, index uint, val byte) {
// b[index] = val
*(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index))) = val
@ -222,49 +226,26 @@ func byteSliceSameData(v1 []byte, v2 []byte) bool {
return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
}
// MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
// These functions expect that the bound check already occured and are are valid.
// copy(...) does a number of checks which are unnecessary in this situation when in bounds.
func okBytes2(b []byte) [2]byte {
return *((*[2]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
func okBytes3(b []byte) [3]byte {
return *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
func okBytes4(b []byte) [4]byte {
return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
func okBytes8(b []byte) [8]byte {
return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
// isNil says whether the value v is nil.
// This applies to references like map/ptr/unsafepointer/chan/func,
// and non-reference values like interface/slice.
func isNil(v interface{}) (rv reflect.Value, isnil bool) {
var ui = (*unsafeIntf)(unsafe.Pointer(&v))
isnil = ui.ptr == nil
if !isnil {
rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
}
return
}
func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
tk := rv.Kind()
isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
return
}
// return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
// true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
// isNil checks - without much effort - if an interface is nil.
//
// Assumes that v is a reference (map/func/chan/ptr/func)
// returned rv is not guaranteed to be valid (e.g. if v == nil).
//
// Note that this will handle all pointer-sized types e.g.
// pointer, map, chan, func, etc.
func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) {
b = ((*unsafeIntf)(unsafe.Pointer(&v))).ptr == nil
return
}
func ptrToLowLevel[T any](ptr *T) unsafe.Pointer {
return unsafe.Pointer(ptr)
}
func lowLevelToPtr[T any](v unsafe.Pointer) *T {
return (*T)(v)
}
// Given that v is a reference (map/func/chan/ptr/unsafepointer) kind, return the pointer
func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
if v.flag&unsafeFlagIndir != 0 {
return *(*unsafe.Pointer)(v.ptr)
@ -295,13 +276,6 @@ func rv4istr(i interface{}) (v reflect.Value) {
}
func rv2i(rv reflect.Value) (i interface{}) {
// We tap into implememtation details from
// the source go stdlib reflect/value.go, and trims the implementation.
//
// e.g.
// - a map/ptr is a reference, thus flagIndir is not set on it
// - an int/slice is not a reference, thus flagIndir is set on it
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
urv.ptr = *(*unsafe.Pointer)(urv.ptr)
@ -316,12 +290,22 @@ func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
return rv
}
// return true if this rv - got from a pointer kind - is nil.
// For now, only use for struct fields of pointer types, as we're guaranteed
// that flagIndir will never be set.
func rvPtrIsNil(rv reflect.Value) bool {
return rvIsNil(rv)
}
// checks if a nil'able value is nil
func rvIsNil(rv reflect.Value) bool {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
if urv.flag&unsafeFlagIndir != 0 {
return *(*unsafe.Pointer)(urv.ptr) == nil
if urv.flag&unsafeFlagIndir == 0 {
return urv.ptr == nil
}
return urv.ptr == nil
// flagIndir is set for a reference (ptr/map/func/unsafepointer/chan)
// OR kind is slice/interface
return *(*unsafe.Pointer)(urv.ptr) == nil
}
func rvSetSliceLen(rv reflect.Value, length int) {
@ -499,29 +483,62 @@ func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos
return false
}
// is this an empty interface/ptr/struct/map/slice/chan/array
func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
switch v.Kind() {
case reflect.Slice:
return (*unsafeSlice)(urv.ptr).Len == 0
case reflect.Struct:
if tinfos == nil {
tinfos = defTypeInfos
}
ti := tinfos.find(uintptr(urv.typ))
if ti == nil {
ti = tinfos.load(v.Type())
}
return unsafeCmpZero(urv.ptr, int(ti.size))
case reflect.Interface, reflect.Ptr:
// isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
if recursive && !isnil {
return isEmptyValue(v.Elem(), tinfos, recursive)
}
return isnil
case reflect.Chan:
return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
case reflect.Map:
return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
case reflect.Array:
return v.Len() == 0 ||
urv.ptr == nil ||
urv.typ == nil ||
rtsize2(urv.typ) == 0 ||
unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
}
return false
}
// --------------------------
type structFieldInfos struct {
c unsafe.Pointer // source
s unsafe.Pointer // sorted
c unsafe.Pointer // source
s unsafe.Pointer // sorted
t uint8To32TrieNode
length int
// byName map[string]*structFieldInfo // find sfi given a name
}
// func (x *structFieldInfos) load(source, sorted []*structFieldInfo, sourceNames, sortedNames []string) {
func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
s := (*unsafeSlice)(unsafe.Pointer(&sorted))
x.s = s.Data
x.length = s.Len
var s *unsafeSlice
s = (*unsafeSlice)(unsafe.Pointer(&source))
x.c = s.Data
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
// s := (*unsafeSlice)(unsafe.Pointer(&v))
// s.Data = x.sorted0
// s.Len = x.length
// s.Cap = s.Len
return
x.length = s.Len
s = (*unsafeSlice)(unsafe.Pointer(&sorted))
x.s = s.Data
}
func (x *structFieldInfos) source() (v []*structFieldInfo) {
@ -529,66 +546,48 @@ func (x *structFieldInfos) source() (v []*structFieldInfo) {
return
}
// atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
//
// Note that we do not atomically load/store length and data pointer separately,
// as this could lead to some races. Instead, we atomically load/store cappedSlice.
//
// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
// ----------------------
type atomicTypeInfoSlice struct {
v unsafe.Pointer // *[]rtid2ti
}
func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
x2 := atomic.LoadPointer(&x.v)
if x2 != nil {
s = *(*[]rtid2ti)(x2)
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
return
}
func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
atomic.StorePointer(&x.v, unsafe.Pointer(&p))
// --------------------------
type uint8To32TrieNodeNoKids struct {
key uint8
valid bool // the value marks the end of a full stored string
numkids uint8
_ byte // padding
value uint32
}
// MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
// This is 2 words.
// consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
type uint8To32TrieNodeKids = *uint8To32TrieNode
func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) {
x.numkids = uint8(len(kids))
x.kids = &kids[0]
}
func (x *uint8To32TrieNode) getKids() (v []uint8To32TrieNode) {
*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{unsafe.Pointer(x.kids), int(x.numkids), int(x.numkids)}
return
}
func (x *uint8To32TrieNode) truncKids() { x.numkids = 0 }
// --------------------------
// Note that we do not atomically load/store length and data pointer separately,
// as this could lead to some races. Instead, we atomically load/store cappedSlice.
type atomicRtidFnSlice struct {
v unsafe.Pointer // *[]codecRtidFn
}
func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
x2 := atomic.LoadPointer(&x.v)
if x2 != nil {
s = *(*[]codecRtidFn)(x2)
}
return
func (x *atomicRtidFnSlice) load() (s unsafe.Pointer) {
return atomic.LoadPointer(&x.v)
}
func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
atomic.StorePointer(&x.v, unsafe.Pointer(&p))
}
// --------------------------
type atomicClsErr struct {
v unsafe.Pointer // *clsErr
}
func (x *atomicClsErr) load() (e clsErr) {
x2 := (*clsErr)(atomic.LoadPointer(&x.v))
if x2 != nil {
e = *x2
}
return
}
func (x *atomicClsErr) store(p clsErr) {
atomic.StorePointer(&x.v, unsafe.Pointer(&p))
func (x *atomicRtidFnSlice) store(p unsafe.Pointer) {
atomic.StorePointer(&x.v, p)
}
// --------------------------
@ -660,98 +659,79 @@ func (n *fauxUnion) rb() (v reflect.Value) {
// --------------------------
func rvSetBytes(rv reflect.Value, v []byte) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*[]byte)(urv.ptr) = v
*(*[]byte)(rvPtr(rv)) = v
}
func rvSetString(rv reflect.Value, v string) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*string)(urv.ptr) = v
*(*string)(rvPtr(rv)) = v
}
func rvSetBool(rv reflect.Value, v bool) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*bool)(urv.ptr) = v
*(*bool)(rvPtr(rv)) = v
}
func rvSetTime(rv reflect.Value, v time.Time) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*time.Time)(urv.ptr) = v
*(*time.Time)(rvPtr(rv)) = v
}
func rvSetFloat32(rv reflect.Value, v float32) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float32)(urv.ptr) = v
*(*float32)(rvPtr(rv)) = v
}
func rvSetFloat64(rv reflect.Value, v float64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float64)(urv.ptr) = v
*(*float64)(rvPtr(rv)) = v
}
func rvSetComplex64(rv reflect.Value, v complex64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*complex64)(urv.ptr) = v
*(*complex64)(rvPtr(rv)) = v
}
func rvSetComplex128(rv reflect.Value, v complex128) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*complex128)(urv.ptr) = v
*(*complex128)(rvPtr(rv)) = v
}
func rvSetInt(rv reflect.Value, v int) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int)(urv.ptr) = v
*(*int)(rvPtr(rv)) = v
}
func rvSetInt8(rv reflect.Value, v int8) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int8)(urv.ptr) = v
*(*int8)(rvPtr(rv)) = v
}
func rvSetInt16(rv reflect.Value, v int16) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int16)(urv.ptr) = v
*(*int16)(rvPtr(rv)) = v
}
func rvSetInt32(rv reflect.Value, v int32) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int32)(urv.ptr) = v
*(*int32)(rvPtr(rv)) = v
}
func rvSetInt64(rv reflect.Value, v int64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int64)(urv.ptr) = v
*(*int64)(rvPtr(rv)) = v
}
func rvSetUint(rv reflect.Value, v uint) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint)(urv.ptr) = v
*(*uint)(rvPtr(rv)) = v
}
func rvSetUintptr(rv reflect.Value, v uintptr) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uintptr)(urv.ptr) = v
*(*uintptr)(rvPtr(rv)) = v
}
func rvSetUint8(rv reflect.Value, v uint8) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint8)(urv.ptr) = v
*(*uint8)(rvPtr(rv)) = v
}
func rvSetUint16(rv reflect.Value, v uint16) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint16)(urv.ptr) = v
*(*uint16)(rvPtr(rv)) = v
}
func rvSetUint32(rv reflect.Value, v uint32) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint32)(urv.ptr) = v
*(*uint32)(rvPtr(rv)) = v
}
func rvSetUint64(rv reflect.Value, v uint64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = v
*(*uint64)(rvPtr(rv)) = v
}
// ----------------
@ -775,12 +755,10 @@ func rvSetDirect(rv reflect.Value, v reflect.Value) {
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
if uv.flag&unsafeFlagIndir == 0 {
*(*unsafe.Pointer)(urv.ptr) = uv.ptr
} else if uv.ptr == unsafeZeroAddr {
if urv.ptr != unsafeZeroAddr {
typedmemclr(urv.typ, urv.ptr)
}
} else {
} else if uv.ptr != unsafeZeroAddr {
typedmemmove(urv.typ, urv.ptr, uv.ptr)
} else if urv.ptr != unsafeZeroAddr {
typedmemclr(urv.typ, urv.ptr)
}
}
@ -812,11 +790,9 @@ func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Valu
// It is typically called when we know that SetLen(...) cannot be done.
func rvSlice(rv reflect.Value, length int) reflect.Value {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
var x []struct{}
ux := (*unsafeSlice)(unsafe.Pointer(&x))
*ux = *(*unsafeSlice)(urv.ptr)
ux := *(*unsafeSlice)(urv.ptr) // copy slice header
ux.Len = length
urv.ptr = unsafe.Pointer(ux)
urv.ptr = unsafe.Pointer(&ux)
return rv
}
@ -834,10 +810,16 @@ func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value
// ------------
func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo, isSlice bool) (v reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
if isSlice {
uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data))
} else {
uv.ptr = unsafe.Pointer(uintptr(urv.ptr))
}
uv.ptr = unsafe.Add(uv.ptr, ti.elemsize*uint32(i))
// uv.ptr = unsafe.Pointer(ptr + uintptr(int(ti.elemsize)*i))
uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
return
@ -861,19 +843,11 @@ func rvCapSlice(rv reflect.Value) int {
return (*unsafeSlice)(urv.ptr).Cap
}
func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
return
}
// if scratch is nil, then return a writable view (assuming canAddr=true)
func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
func rvGetArrayBytes(rv reflect.Value, _ []byte) (bs []byte) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
bx := (*unsafeSlice)(unsafe.Pointer(&bs))
// bx.Data, bx.Len, bx.Cap = urv.ptr, rv.Len(), bx.Len
bx.Data = urv.ptr
bx.Len = rv.Len()
bx.Cap = bx.Len
@ -889,7 +863,7 @@ func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
//
// Consequently, we use rvLenSlice, not rvCapSlice.
t := reflectArrayOf(rvLenSlice(rv), rv.Type().Elem())
t := reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem())
// v = rvZeroAddrK(t, reflect.Array)
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
@ -921,99 +895,84 @@ func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
// ------------
func rvPtr(rv reflect.Value) unsafe.Pointer {
return (*unsafeReflectValue)(unsafe.Pointer(&rv)).ptr
}
func rvGetBool(rv reflect.Value) bool {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*bool)(v.ptr)
return *(*bool)(rvPtr(rv))
}
func rvGetBytes(rv reflect.Value) []byte {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*[]byte)(v.ptr)
return *(*[]byte)(rvPtr(rv))
}
func rvGetTime(rv reflect.Value) time.Time {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*time.Time)(v.ptr)
return *(*time.Time)(rvPtr(rv))
}
func rvGetString(rv reflect.Value) string {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*string)(v.ptr)
return *(*string)(rvPtr(rv))
}
func rvGetFloat64(rv reflect.Value) float64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*float64)(v.ptr)
return *(*float64)(rvPtr(rv))
}
func rvGetFloat32(rv reflect.Value) float32 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*float32)(v.ptr)
return *(*float32)(rvPtr(rv))
}
func rvGetComplex64(rv reflect.Value) complex64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*complex64)(v.ptr)
return *(*complex64)(rvPtr(rv))
}
func rvGetComplex128(rv reflect.Value) complex128 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*complex128)(v.ptr)
return *(*complex128)(rvPtr(rv))
}
func rvGetInt(rv reflect.Value) int {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int)(v.ptr)
return *(*int)(rvPtr(rv))
}
func rvGetInt8(rv reflect.Value) int8 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int8)(v.ptr)
return *(*int8)(rvPtr(rv))
}
func rvGetInt16(rv reflect.Value) int16 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int16)(v.ptr)
return *(*int16)(rvPtr(rv))
}
func rvGetInt32(rv reflect.Value) int32 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int32)(v.ptr)
return *(*int32)(rvPtr(rv))
}
func rvGetInt64(rv reflect.Value) int64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int64)(v.ptr)
return *(*int64)(rvPtr(rv))
}
func rvGetUint(rv reflect.Value) uint {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint)(v.ptr)
return *(*uint)(rvPtr(rv))
}
func rvGetUint8(rv reflect.Value) uint8 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint8)(v.ptr)
return *(*uint8)(rvPtr(rv))
}
func rvGetUint16(rv reflect.Value) uint16 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint16)(v.ptr)
return *(*uint16)(rvPtr(rv))
}
func rvGetUint32(rv reflect.Value) uint32 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint32)(v.ptr)
return *(*uint32)(rvPtr(rv))
}
func rvGetUint64(rv reflect.Value) uint64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint64)(v.ptr)
return *(*uint64)(rvPtr(rv))
}
func rvGetUintptr(rv reflect.Value) uintptr {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uintptr)(v.ptr)
return *(*uintptr)(rvPtr(rv))
}
func rvLenMap(rv reflect.Value) int {
@ -1027,32 +986,6 @@ func rvLenMap(rv reflect.Value) int {
return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
}
// copy is an intrinsic, which may use asm if length is small,
// or make a runtime call to runtime.memmove if length is large.
// Performance suffers when you always call runtime.memmove function.
//
// Consequently, there's no value in a copybytes call - just call copy() directly
// func copybytes(to, from []byte) (n int) {
// n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
// memmove(
// (*unsafeSlice)(unsafe.Pointer(&to)).Data,
// (*unsafeSlice)(unsafe.Pointer(&from)).Data,
// uintptr(n),
// )
// return
// }
// func copybytestr(to []byte, from string) (n int) {
// n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
// memmove(
// (*unsafeSlice)(unsafe.Pointer(&to)).Data,
// (*unsafeSlice)(unsafe.Pointer(&from)).Data,
// uintptr(n),
// )
// return
// }
// Note: it is hard to find len(...) of an array type,
// as that is a field in the arrayType representing the array, and hard to introspect.
//
@ -1065,24 +998,26 @@ func rvLenMap(rv reflect.Value) int {
//
// It is more performant to provide a value that the map entry is set into,
// and that elides the allocation.
// go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
// hIter struct with the first 2 values being key and value
// of the current iteration.
//
// go 1.4 through go 1.23 (in runtime/hashmap.go or runtime/map.go) has a hIter struct
// with the first 2 values being pointers for key and value of the current iteration.
// The next 6 values are pointers, followed by numeric types (uintptr, uint8, bool, etc).
// This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
// We bypass the reflect wrapper functions and just use the *hIter directly.
//
// Though *hIter has many fields, we only care about the first 2.
// In go 1.24, swissmap was introduced, and it provides a compatibility layer
// for hIter (called linknameIter). This has only 2 pointer fields after the key and value pointers.
//
// We directly embed this in unsafeMapIter below
// Note: We bypass the reflect wrapper functions and just use the *hIter directly.
//
// hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
// so it fills multiple cache lines and can give some extra space to accomodate small growth.
// When 'faking' these types with our own, we MUST ensure that the GC sees the pointers
// appropriately. These are reflected in goversion_(no)swissmap_unsafe.go files.
// In these files, we pad the extra spaces appropriately.
//
// Note: the faux hIter/linknameIter is directly embedded in unsafeMapIter below
type unsafeMapIter struct {
mtyp, mptr unsafe.Pointer
k, v reflect.Value
k, v unsafeReflectValue
kisref bool
visref bool
mapvalues bool
@ -1092,7 +1027,7 @@ type unsafeMapIter struct {
it struct {
key unsafe.Pointer
value unsafe.Pointer
_ [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
_ unsafeMapIterPadding
}
}
@ -1112,18 +1047,16 @@ func (t *unsafeMapIter) Next() (r bool) {
}
if helperUnsafeDirectAssignMapEntry || t.kisref {
(*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
t.k.ptr = t.it.key
} else {
k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
typedmemmove(k.typ, k.ptr, t.it.key)
typedmemmove(t.k.typ, t.k.ptr, t.it.key)
}
if t.mapvalues {
if helperUnsafeDirectAssignMapEntry || t.visref {
(*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
t.v.ptr = t.it.value
} else {
v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
typedmemmove(v.typ, v.ptr, t.it.value)
typedmemmove(t.v.typ, t.v.ptr, t.it.value)
}
}
@ -1131,11 +1064,11 @@ func (t *unsafeMapIter) Next() (r bool) {
}
func (t *unsafeMapIter) Key() (r reflect.Value) {
return t.k
return *(*reflect.Value)(unsafe.Pointer(&t.k))
}
func (t *unsafeMapIter) Value() (r reflect.Value) {
return t.v
return *(*reflect.Value)(unsafe.Pointer(&t.v))
}
func (t *unsafeMapIter) Done() {}
@ -1162,14 +1095,14 @@ func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
// t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
t.k = k
t.k = *(*unsafeReflectValue)(unsafe.Pointer(&k))
t.kisref = refBitset.isset(byte(k.Kind()))
if mapvalues {
t.v = v
t.v = *(*unsafeReflectValue)(unsafe.Pointer(&v))
t.visref = refBitset.isset(byte(v.Kind()))
} else {
t.v = reflect.Value{}
t.v = unsafeReflectValue{}
}
}
@ -1182,13 +1115,6 @@ func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
return urv.ptr
}
// func mapDelete(m, k reflect.Value) {
// var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
// var kptr = unsafeMapKVPtr(urv)
// urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
// mapdelete(urv.typ, rv2ptr(urv), kptr)
// }
// return an addressable reflect value that can be used in mapRange and mapGet operations.
//
// all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
@ -1205,53 +1131,39 @@ func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
return
}
// ---------- ENCODER optimized ---------------
func (e *Encoder) jsondriver() *jsonEncDriver {
return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) {
t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
urv.typ = t
urv.flag = uintptr(reflect.Map)
urv.ptr = makemap(t, size, nil)
return
}
func (d *Decoder) zerocopystate() bool {
return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
}
func (d *Decoder) stringZC(v []byte) (s string) {
// MARKER: inline zerocopystate directly so genHelper forwarding function fits within inlining cost
// if d.zerocopystate() {
if d.decByteState == decByteStateZerocopy && d.h.ZeroCopy {
return stringView(v)
}
return d.string(v)
}
func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
if !d.zerocopystate() {
*callFnRvk = true
if d.decByteState == decByteStateReuseBuf {
*kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
*kstr2bs = *kstrbs
}
}
return stringView(*kstr2bs)
}
// ---------- DECODER optimized ---------------
func (d *Decoder) jsondriver() *jsonDecDriver {
return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
func (d *decoderBase) bytes2Str(in []byte, state dBytesAttachState) (s string, mutable bool) {
return stringView(in), state <= dBytesAttachBuffer
}
// ---------- structFieldInfo optimized ---------------
func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
func (n *structFieldInfoNode) rvField(v reflect.Value) (rv reflect.Value) {
// we already know this is exported, and maybe embedded (based on what si says)
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// clear flagEmbedRO if necessary, and inherit permission bits from v
urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
// *(*unsafeReflectValue)(unsafe.Pointer(&rv)) = unsafeReflectValue{
// unsafeIntf: unsafeIntf{
// typ: ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr,
// ptr: unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset)),
// },
// flag: uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind),
// }
return
}
@ -1299,10 +1211,6 @@ func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
// failing with "error: undefined reference" error.
// however, runtime.{mallocgc, newarray} are supported, so use that instead.
//go:linkname memmove runtime.memmove
//go:noescape
func memmove(to, from unsafe.Pointer, n uintptr)
//go:linkname mallocgc runtime.mallocgc
//go:noescape
func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
@ -1319,10 +1227,6 @@ func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
//go:noescape
func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
//go:linkname mapdelete runtime.mapdelete
//go:noescape
func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
//go:linkname mapassign runtime.mapassign
//go:noescape
func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
@ -1331,6 +1235,10 @@ func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.
//go:noescape
func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
//go:linkname makemap runtime.makemap
//go:noescape
func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer
// reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
// and if a writeBarrier is needed, before delegating to the right method in the runtime.
//

View file

@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.9 && gc
// +build !safe,!codec.safe,!appengine,go1.9,gc
package codec
@ -24,8 +23,67 @@ const (
mapMaxElemSize = 128
)
func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsafeSlice) {
return growslice(typ, old, cap+incr)
type mapKeyFastKind uint8
const (
mapKeyFastKindAny = iota + 1
mapKeyFastKind32
mapKeyFastKind32ptr
mapKeyFastKind64
mapKeyFastKind64ptr
mapKeyFastKindStr
)
var mapKeyFastKindVals [32]mapKeyFastKind
type mapReqParams struct {
kfast mapKeyFastKind
ref bool
indirect bool
}
func getMapReqParams(ti *typeInfo) (r mapReqParams) {
r.indirect = mapStoresElemIndirect(uintptr(ti.elemsize))
r.ref = refBitset.isset(ti.elemkind)
r.kfast = mapKeyFastKindFor(reflect.Kind(ti.keykind))
return
}
func init() {
xx := func(f mapKeyFastKind, k ...reflect.Kind) {
for _, v := range k {
mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
}
}
var f mapKeyFastKind
f = mapKeyFastKind64
if wordSizeBits == 32 {
f = mapKeyFastKind32
}
xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
f = mapKeyFastKind64ptr
if wordSizeBits == 32 {
f = mapKeyFastKind32ptr
}
xx(f, reflect.Ptr)
xx(mapKeyFastKindStr, reflect.String)
xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
}
func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
return mapKeyFastKindVals[k&31]
}
func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (s unsafeSlice) {
// culled from GOROOT/runtime/slice.go
s = rtgrowslice(old.Data, old.Cap+incr, old.Cap, incr, typ)
s.Len = old.Len
return
}
// func rvType(rv reflect.Value) reflect.Type {
@ -43,7 +101,7 @@ func mapStoresElemIndirect(elemsize uintptr) bool {
return elemsize > mapMaxElemSize
}
func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) {
func mapSet(m, k, v reflect.Value, p mapReqParams) { // valIsRef
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
@ -60,14 +118,15 @@ func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
// Sometimes, we got vvptr == nil when we dereferenced vvptr (if valIsIndirect).
// Consequently, only use fastXXX functions if !valIsIndirect
if valIsIndirect {
if p.indirect {
vvptr = mapassign(urv.typ, mptr, kptr)
typedmemmove(vtyp, vvptr, vptr)
// reflect_mapassign(urv.typ, mptr, kptr, vptr)
return
// typedmemmove(vtyp, vvptr, vptr)
// // reflect_mapassign(urv.typ, mptr, kptr, vptr)
// return
goto END
}
switch keyFastKind {
switch p.kfast {
case mapKeyFastKind32:
vvptr = mapassign_fast32(urv.typ, mptr, *(*uint32)(kptr))
case mapKeyFastKind32ptr:
@ -82,14 +141,14 @@ func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
vvptr = mapassign(urv.typ, mptr, kptr)
}
// if keyFastKind != 0 && valIsIndirect {
// if p.kfast != 0 && valIsIndirect {
// vvptr = *(*unsafe.Pointer)(vvptr)
// }
END:
typedmemmove(vtyp, vvptr, vptr)
}
func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) (_ reflect.Value) {
func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) {
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
@ -101,7 +160,7 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
// Note that mapaccess2_fastXXX functions do not check if the value needs to be copied.
// if they do, we should dereference the pointer and return that
switch keyFastKind {
switch p.kfast {
case mapKeyFastKind32, mapKeyFastKind32ptr:
vvptr, ok = mapaccess2_fast32(urv.typ, mptr, *(*uint32)(kptr))
case mapKeyFastKind64, mapKeyFastKind64ptr:
@ -118,9 +177,9 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
if keyFastKind != 0 && valIsIndirect {
if p.kfast != 0 && p.indirect {
urv.ptr = *(*unsafe.Pointer)(vvptr)
} else if helperUnsafeDirectAssignMapEntry || valIsRef {
} else if helperUnsafeDirectAssignMapEntry || p.ref {
urv.ptr = vvptr
} else {
typedmemmove(urv.typ, urv.ptr, vvptr)
@ -129,13 +188,11 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
return v
}
// ----
//go:linkname unsafeZeroArr runtime.zeroVal
var unsafeZeroArr [1024]byte
// //go:linkname rvPtrToType reflect.toType
// //go:noescape
// func rvPtrToType(typ unsafe.Pointer) reflect.Type
//go:linkname mapassign_fast32 runtime.mapassign_fast32
//go:noescape
func mapassign_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) unsafe.Pointer
@ -167,3 +224,19 @@ func mapaccess2_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) (val un
//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr
//go:noescape
func mapaccess2_faststr(typ unsafe.Pointer, m unsafe.Pointer, key string) (val unsafe.Pointer, ok bool)
//go:linkname rtgrowslice runtime.growslice
//go:noescape
func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice
// ----
// //go:linkname rvPtrToType reflect.toType
// //go:noescape
// func rvPtrToType(typ unsafe.Pointer) reflect.Type
// //go:linkname growslice reflect.growslice
// //go:noescape
// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice
// ----

View file

@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.9 && !gc
// +build !safe,!codec.safe,!appengine,go1.9,!gc
package codec
@ -14,6 +13,15 @@ import (
var unsafeZeroArr [1024]byte
type mapReqParams struct {
ref bool
}
func getMapReqParams(ti *typeInfo) (r mapReqParams) {
r.ref = refBitset.isset(ti.elemkind)
return
}
// runtime.growslice does not work with gccgo, failing with "growslice: cap out of range" error.
// consequently, we just call newarray followed by typedslicecopy directly.
@ -31,18 +39,11 @@ func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsa
return
}
// func unsafeNew(t reflect.Type, typ unsafe.Pointer) unsafe.Pointer {
// rv := reflect.New(t)
// return ((*unsafeReflectValue)(unsafe.Pointer(&rv))).ptr
// }
// runtime.{mapassign_fastXXX, mapaccess2_fastXXX} are not supported in gollvm,
// failing with "error: undefined reference" error.
// so we just use runtime.{mapassign, mapaccess2} directly
func mapStoresElemIndirect(elemsize uintptr) bool { return false }
func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) {
func mapSet(m, k, v reflect.Value, p mapReqParams) {
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
@ -56,7 +57,7 @@ func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) {
typedmemmove(vtyp, vvptr, vptr)
}
func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflect.Value) {
func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) {
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
@ -70,7 +71,7 @@ func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflec
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
if helperUnsafeDirectAssignMapEntry || valIsRef {
if helperUnsafeDirectAssignMapEntry || p.ref {
urv.ptr = vvptr
} else {
typedmemmove(urv.typ, urv.ptr, vvptr)

130
vendor/github.com/ugorji/go/codec/init.mono.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !notmono && !codec.notmono
package codec
import "io"
func callMake(v interface{}) {}
type encWriter interface{ encWriterI }
type decReader interface{ decReaderI }
type encDriver interface{ encDriverI }
type decDriver interface{ decDriverI }
func (h *SimpleHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriverSimpleBytes{}.newEncoderBytes(out, h)
}
func (h *SimpleHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriverSimpleIO{}.newEncoderIO(w, h)
}
func (h *SimpleHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriverSimpleBytes{}.newDecoderBytes(in, h)
}
func (h *SimpleHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriverSimpleIO{}.newDecoderIO(r, h)
}
func (h *JsonHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriverJsonBytes{}.newEncoderBytes(out, h)
}
func (h *JsonHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriverJsonIO{}.newEncoderIO(w, h)
}
func (h *JsonHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriverJsonBytes{}.newDecoderBytes(in, h)
}
func (h *JsonHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriverJsonIO{}.newDecoderIO(r, h)
}
func (h *MsgpackHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriverMsgpackBytes{}.newEncoderBytes(out, h)
}
func (h *MsgpackHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriverMsgpackIO{}.newEncoderIO(w, h)
}
func (h *MsgpackHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriverMsgpackBytes{}.newDecoderBytes(in, h)
}
func (h *MsgpackHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriverMsgpackIO{}.newDecoderIO(r, h)
}
func (h *BincHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriverBincBytes{}.newEncoderBytes(out, h)
}
func (h *BincHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriverBincIO{}.newEncoderIO(w, h)
}
func (h *BincHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriverBincBytes{}.newDecoderBytes(in, h)
}
func (h *BincHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriverBincIO{}.newDecoderIO(r, h)
}
func (h *CborHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriverCborBytes{}.newEncoderBytes(out, h)
}
func (h *CborHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriverCborIO{}.newEncoderIO(w, h)
}
func (h *CborHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriverCborBytes{}.newDecoderBytes(in, h)
}
func (h *CborHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriverCborIO{}.newDecoderIO(r, h)
}
var (
bincFpEncIO = helperEncDriverBincIO{}.fastpathEList()
bincFpEncBytes = helperEncDriverBincBytes{}.fastpathEList()
bincFpDecIO = helperDecDriverBincIO{}.fastpathDList()
bincFpDecBytes = helperDecDriverBincBytes{}.fastpathDList()
)
var (
cborFpEncIO = helperEncDriverCborIO{}.fastpathEList()
cborFpEncBytes = helperEncDriverCborBytes{}.fastpathEList()
cborFpDecIO = helperDecDriverCborIO{}.fastpathDList()
cborFpDecBytes = helperDecDriverCborBytes{}.fastpathDList()
)
var (
jsonFpEncIO = helperEncDriverJsonIO{}.fastpathEList()
jsonFpEncBytes = helperEncDriverJsonBytes{}.fastpathEList()
jsonFpDecIO = helperDecDriverJsonIO{}.fastpathDList()
jsonFpDecBytes = helperDecDriverJsonBytes{}.fastpathDList()
)
var (
msgpackFpEncIO = helperEncDriverMsgpackIO{}.fastpathEList()
msgpackFpEncBytes = helperEncDriverMsgpackBytes{}.fastpathEList()
msgpackFpDecIO = helperDecDriverMsgpackIO{}.fastpathDList()
msgpackFpDecBytes = helperDecDriverMsgpackBytes{}.fastpathDList()
)
var (
simpleFpEncIO = helperEncDriverSimpleIO{}.fastpathEList()
simpleFpEncBytes = helperEncDriverSimpleBytes{}.fastpathEList()
simpleFpDecIO = helperDecDriverSimpleIO{}.fastpathDList()
simpleFpDecBytes = helperDecDriverSimpleBytes{}.fastpathDList()
)

313
vendor/github.com/ugorji/go/codec/init.notmono.go generated vendored Normal file
View file

@ -0,0 +1,313 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build notmono || codec.notmono
package codec
import (
"io"
)
// This contains all the iniatializations of generics.
// Putting it into one file, ensures that we can go generics or not.
type maker interface{ Make() }
func callMake(v interface{}) {
v.(maker).Make()
}
// ---- (writer.go)
type encWriter interface {
bufioEncWriterM | bytesEncAppenderM
encWriterI
}
type bytesEncAppenderM struct {
*bytesEncAppender
}
func (z *bytesEncAppenderM) Make() {
z.bytesEncAppender = new(bytesEncAppender)
z.out = &bytesEncAppenderDefOut
}
type bufioEncWriterM struct {
*bufioEncWriter
}
func (z *bufioEncWriterM) Make() {
z.bufioEncWriter = new(bufioEncWriter)
z.w = io.Discard
}
// ---- reader.go
type decReader interface {
bytesDecReaderM | ioDecReaderM
decReaderI
}
type bytesDecReaderM struct {
*bytesDecReader
}
func (z *bytesDecReaderM) Make() {
z.bytesDecReader = new(bytesDecReader)
}
type ioDecReaderM struct {
*ioDecReader
}
func (z *ioDecReaderM) Make() {
z.ioDecReader = new(ioDecReader)
}
// type helperEncWriter[T encWriter] struct{}
// type helperDecReader[T decReader] struct{}
// func (helperDecReader[T]) decByteSlice(r T, clen, maxInitLen int, bs []byte) (bsOut []byte) {
// ---- (encode.go)
type encDriver interface {
simpleEncDriverM[bufioEncWriterM] |
simpleEncDriverM[bytesEncAppenderM] |
jsonEncDriverM[bufioEncWriterM] |
jsonEncDriverM[bytesEncAppenderM] |
cborEncDriverM[bufioEncWriterM] |
cborEncDriverM[bytesEncAppenderM] |
msgpackEncDriverM[bufioEncWriterM] |
msgpackEncDriverM[bytesEncAppenderM] |
bincEncDriverM[bufioEncWriterM] |
bincEncDriverM[bytesEncAppenderM]
encDriverI
}
// ---- (decode.go)
type decDriver interface {
simpleDecDriverM[bytesDecReaderM] |
simpleDecDriverM[ioDecReaderM] |
jsonDecDriverM[bytesDecReaderM] |
jsonDecDriverM[ioDecReaderM] |
cborDecDriverM[bytesDecReaderM] |
cborDecDriverM[ioDecReaderM] |
msgpackDecDriverM[bytesDecReaderM] |
msgpackDecDriverM[ioDecReaderM] |
bincDecDriverM[bytesDecReaderM] |
bincDecDriverM[ioDecReaderM]
decDriverI
}
// Below: <format>.go files
// ---- (binc.go)
type bincEncDriverM[T encWriter] struct {
*bincEncDriver[T]
}
func (d *bincEncDriverM[T]) Make() {
d.bincEncDriver = new(bincEncDriver[T])
}
type bincDecDriverM[T decReader] struct {
*bincDecDriver[T]
}
func (d *bincDecDriverM[T]) Make() {
d.bincDecDriver = new(bincDecDriver[T])
}
var (
bincFpEncIO = helperEncDriver[bincEncDriverM[bufioEncWriterM]]{}.fastpathEList()
bincFpEncBytes = helperEncDriver[bincEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
bincFpDecIO = helperDecDriver[bincDecDriverM[ioDecReaderM]]{}.fastpathDList()
bincFpDecBytes = helperDecDriver[bincDecDriverM[bytesDecReaderM]]{}.fastpathDList()
)
// ---- (cbor.go)
type cborEncDriverM[T encWriter] struct {
*cborEncDriver[T]
}
func (d *cborEncDriverM[T]) Make() {
d.cborEncDriver = new(cborEncDriver[T])
}
type cborDecDriverM[T decReader] struct {
*cborDecDriver[T]
}
func (d *cborDecDriverM[T]) Make() {
d.cborDecDriver = new(cborDecDriver[T])
}
var (
cborFpEncIO = helperEncDriver[cborEncDriverM[bufioEncWriterM]]{}.fastpathEList()
cborFpEncBytes = helperEncDriver[cborEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
cborFpDecIO = helperDecDriver[cborDecDriverM[ioDecReaderM]]{}.fastpathDList()
cborFpDecBytes = helperDecDriver[cborDecDriverM[bytesDecReaderM]]{}.fastpathDList()
)
// ---- (json.go)
type jsonEncDriverM[T encWriter] struct {
*jsonEncDriver[T]
}
func (d *jsonEncDriverM[T]) Make() {
d.jsonEncDriver = new(jsonEncDriver[T])
}
type jsonDecDriverM[T decReader] struct {
*jsonDecDriver[T]
}
func (d *jsonDecDriverM[T]) Make() {
d.jsonDecDriver = new(jsonDecDriver[T])
}
var (
jsonFpEncIO = helperEncDriver[jsonEncDriverM[bufioEncWriterM]]{}.fastpathEList()
jsonFpEncBytes = helperEncDriver[jsonEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
jsonFpDecIO = helperDecDriver[jsonDecDriverM[ioDecReaderM]]{}.fastpathDList()
jsonFpDecBytes = helperDecDriver[jsonDecDriverM[bytesDecReaderM]]{}.fastpathDList()
)
// ---- (msgpack.go)
type msgpackEncDriverM[T encWriter] struct {
*msgpackEncDriver[T]
}
func (d *msgpackEncDriverM[T]) Make() {
d.msgpackEncDriver = new(msgpackEncDriver[T])
}
type msgpackDecDriverM[T decReader] struct {
*msgpackDecDriver[T]
}
func (d *msgpackDecDriverM[T]) Make() {
d.msgpackDecDriver = new(msgpackDecDriver[T])
}
var (
msgpackFpEncIO = helperEncDriver[msgpackEncDriverM[bufioEncWriterM]]{}.fastpathEList()
msgpackFpEncBytes = helperEncDriver[msgpackEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
msgpackFpDecIO = helperDecDriver[msgpackDecDriverM[ioDecReaderM]]{}.fastpathDList()
msgpackFpDecBytes = helperDecDriver[msgpackDecDriverM[bytesDecReaderM]]{}.fastpathDList()
)
// ---- (simple.go)
type simpleEncDriverM[T encWriter] struct {
*simpleEncDriver[T]
}
func (d *simpleEncDriverM[T]) Make() {
d.simpleEncDriver = new(simpleEncDriver[T])
}
type simpleDecDriverM[T decReader] struct {
*simpleDecDriver[T]
}
func (d *simpleDecDriverM[T]) Make() {
d.simpleDecDriver = new(simpleDecDriver[T])
}
var (
simpleFpEncIO = helperEncDriver[simpleEncDriverM[bufioEncWriterM]]{}.fastpathEList()
simpleFpEncBytes = helperEncDriver[simpleEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
simpleFpDecIO = helperDecDriver[simpleDecDriverM[ioDecReaderM]]{}.fastpathDList()
simpleFpDecBytes = helperDecDriver[simpleDecDriverM[bytesDecReaderM]]{}.fastpathDList()
)
func (h *SimpleHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriver[simpleEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
}
func (h *SimpleHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriver[simpleEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
}
func (h *SimpleHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriver[simpleDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
}
func (h *SimpleHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriver[simpleDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
}
func (h *JsonHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriver[jsonEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
}
func (h *JsonHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriver[jsonEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
}
func (h *JsonHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriver[jsonDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
}
func (h *JsonHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriver[jsonDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
}
func (h *MsgpackHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriver[msgpackEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
}
func (h *MsgpackHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriver[msgpackEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
}
func (h *MsgpackHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriver[msgpackDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
}
func (h *MsgpackHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriver[msgpackDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
}
func (h *CborHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriver[cborEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
}
func (h *CborHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriver[cborEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
}
func (h *CborHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriver[cborDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
}
func (h *CborHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriver[cborDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
}
func (h *BincHandle) newEncoderBytes(out *[]byte) encoderI {
return helperEncDriver[bincEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
}
func (h *BincHandle) newEncoder(w io.Writer) encoderI {
return helperEncDriver[bincEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
}
func (h *BincHandle) newDecoderBytes(in []byte) decoderI {
return helperDecDriver[bincDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
}
func (h *BincHandle) newDecoder(r io.Reader) decoderI {
return helperDecDriver[bincDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
}

504
vendor/github.com/ugorji/go/codec/json.base.go generated vendored Normal file
View file

@ -0,0 +1,504 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"encoding/base32"
"encoding/base64"
"errors"
"math"
"reflect"
"strings"
"time"
"unicode"
)
//--------------------------------
// jsonLits and jsonLitb are defined at the package level,
// so they are guaranteed to be stored efficiently, making
// for better append/string comparison/etc.
//
// (anecdotal evidence from some benchmarking on go 1.20 devel in 20220104)
const jsonLits = `"true"false"null"{}[]`
const (
jsonLitT = 1
jsonLitF = 6
jsonLitN = 12
jsonLitM = 17
jsonLitA = 19
)
var jsonLitb = []byte(jsonLits)
var jsonNull = jsonLitb[jsonLitN : jsonLitN+4]
var jsonArrayEmpty = jsonLitb[jsonLitA : jsonLitA+2]
var jsonMapEmpty = jsonLitb[jsonLitM : jsonLitM+2]
const jsonEncodeUintSmallsString = "" +
"00010203040506070809" +
"10111213141516171819" +
"20212223242526272829" +
"30313233343536373839" +
"40414243444546474849" +
"50515253545556575859" +
"60616263646566676869" +
"70717273747576777879" +
"80818283848586878889" +
"90919293949596979899"
var jsonEncodeUintSmallsStringBytes = (*[len(jsonEncodeUintSmallsString)]byte)([]byte(jsonEncodeUintSmallsString))
const (
jsonU4Chk2 = '0'
jsonU4Chk1 = 'a' - 10
jsonU4Chk0 = 'A' - 10
)
const (
// If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
// - If we see first character of null, false or true,
// do not validate subsequent characters.
// - e.g. if we see a n, assume null and skip next 3 characters,
// and do not validate they are ull.
// P.S. Do not expect a significant decoding boost from this.
jsonValidateSymbols = true
// jsonEscapeMultiByteUnicodeSep controls whether some unicode characters
// that are valid json but may bomb in some contexts are escaped during encoeing.
//
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
jsonEscapeMultiByteUnicodeSep = true
// jsonNakedBoolNumInQuotedStr is used during decoding into a blank interface{}
// to control whether we detect quoted values of bools and null where a map key is expected,
// and treat as nil, true or false.
jsonNakedBoolNumInQuotedStr = true
)
var (
// jsonTabs and jsonSpaces are used as caches for indents
jsonTabs [32]byte
jsonSpaces [128]byte
jsonHexEncoder hexEncoder
// jsonTimeLayout is used to validate time layouts.
// Unfortunately, we couldn't compare time.Time effectively, so punted.
// jsonTimeLayout time.Time
)
func init() {
for i := 0; i < len(jsonTabs); i++ {
jsonTabs[i] = '\t'
}
for i := 0; i < len(jsonSpaces); i++ {
jsonSpaces[i] = ' '
}
// jsonTimeLayout, err := time.Parse(time.Layout, time.Layout)
// halt.onerror(err)
// jsonTimeLayout = jsonTimeLayout.Round(time.Second).UTC()
}
// ----------------
type jsonBytesFmt uint8
const (
jsonBytesFmtArray jsonBytesFmt = iota + 1
jsonBytesFmtBase64
jsonBytesFmtBase64url
jsonBytesFmtBase32
jsonBytesFmtBase32hex
jsonBytesFmtBase16
jsonBytesFmtHex = jsonBytesFmtBase16
)
type jsonTimeFmt uint8
const (
jsonTimeFmtStringLayout jsonTimeFmt = iota + 1
jsonTimeFmtUnix
jsonTimeFmtUnixMilli
jsonTimeFmtUnixMicro
jsonTimeFmtUnixNano
)
type jsonBytesFmter = bytesEncoder
type jsonHandleOpts struct {
rawext bool
// bytesFmt used during encode to determine how to encode []byte
bytesFmt jsonBytesFmt
// timeFmt used during encode to determine how to encode a time.Time
timeFmt jsonTimeFmt
// timeFmtNum used during decode to decode a time.Time from an int64 in the stream
timeFmtNum jsonTimeFmt
// timeFmtLayouts used on decode, to try to parse time.Time until successful
timeFmtLayouts []string
// byteFmters used on decode, to try to parse []byte from a UTF-8 string encoding (e.g. base64)
byteFmters []jsonBytesFmter
}
func jsonCheckTimeLayout(s string) (ok bool) {
_, err := time.Parse(s, s)
// t...Equal(jsonTimeLayout) always returns false - unsure why
// return err == nil && t.Round(time.Second).UTC().Equal(jsonTimeLayout)
return err == nil
}
func (x *jsonHandleOpts) reset(h *JsonHandle) {
x.timeFmt = 0
x.timeFmtNum = 0
x.timeFmtLayouts = x.timeFmtLayouts[:0]
if len(h.TimeFormat) != 0 {
switch h.TimeFormat[0] {
case "unix":
x.timeFmt = jsonTimeFmtUnix
case "unixmilli":
x.timeFmt = jsonTimeFmtUnixMilli
case "unixmicro":
x.timeFmt = jsonTimeFmtUnixMicro
case "unixnano":
x.timeFmt = jsonTimeFmtUnixNano
}
x.timeFmtNum = x.timeFmt
for _, v := range h.TimeFormat {
if !strings.HasPrefix(v, "unix") && jsonCheckTimeLayout(v) {
x.timeFmtLayouts = append(x.timeFmtLayouts, v)
}
}
}
if x.timeFmt == 0 { // both timeFmt and timeFmtNum are 0
x.timeFmtNum = jsonTimeFmtUnix
x.timeFmt = jsonTimeFmtStringLayout
if len(x.timeFmtLayouts) == 0 {
x.timeFmtLayouts = append(x.timeFmtLayouts, time.RFC3339Nano)
}
}
x.bytesFmt = 0
x.byteFmters = x.byteFmters[:0]
var b64 bool
if len(h.BytesFormat) != 0 {
switch h.BytesFormat[0] {
case "array":
x.bytesFmt = jsonBytesFmtArray
case "base64":
x.bytesFmt = jsonBytesFmtBase64
case "base64url":
x.bytesFmt = jsonBytesFmtBase64url
case "base32":
x.bytesFmt = jsonBytesFmtBase32
case "base32hex":
x.bytesFmt = jsonBytesFmtBase32hex
case "base16", "hex":
x.bytesFmt = jsonBytesFmtBase16
}
for _, v := range h.BytesFormat {
switch v {
// case "array":
case "base64":
x.byteFmters = append(x.byteFmters, base64.StdEncoding)
b64 = true
case "base64url":
x.byteFmters = append(x.byteFmters, base64.URLEncoding)
case "base32":
x.byteFmters = append(x.byteFmters, base32.StdEncoding)
case "base32hex":
x.byteFmters = append(x.byteFmters, base32.HexEncoding)
case "base16", "hex":
x.byteFmters = append(x.byteFmters, &jsonHexEncoder)
}
}
}
if x.bytesFmt == 0 {
// either len==0 OR gibberish was in the first element; resolve here
x.bytesFmt = jsonBytesFmtBase64
if !b64 { // not present - so insert into pos 0
x.byteFmters = append(x.byteFmters, nil)
copy(x.byteFmters[1:], x.byteFmters[0:])
x.byteFmters[0] = base64.StdEncoding
}
}
// ----
x.rawext = h.RawBytesExt != nil
}
var jsonEncBoolStrs = [2][2]string{
{jsonLits[jsonLitF : jsonLitF+5], jsonLits[jsonLitT : jsonLitT+4]},
{jsonLits[jsonLitF-1 : jsonLitF+6], jsonLits[jsonLitT-1 : jsonLitT+5]},
}
func jsonEncodeUint(neg, quotes bool, u uint64, b *[48]byte) []byte {
// MARKER: use setByteAt/byteAt to elide the bounds-checks
// when we are sure that we don't go beyond the bounds.
// MARKER: copied mostly from std library: strconv/itoa.go
// this should only be called on 64bit OS.
var ss = jsonEncodeUintSmallsStringBytes[:]
// typically, 19 or 20 bytes sufficient for decimal encoding a uint64
var a = b[:24]
var i = uint(len(a))
if quotes {
i--
setByteAt(a, i, '"')
// a[i] = '"'
}
var is, us uint // use uint, as those fit into a register on the platform
if cpu32Bit {
for u >= 1e9 {
q := u / 1e9
us = uint(u - q*1e9) // u % 1e9 fits into a uint
for j := 4; j > 0; j-- {
is = us % 100 * 2
us /= 100
i -= 2
setByteAt(a, i+1, byteAt(ss, is+1))
setByteAt(a, i, byteAt(ss, is))
}
i--
setByteAt(a, i, byteAt(ss, us*2+1))
u = q
}
// u is now < 1e9, so is guaranteed to fit into a uint
}
us = uint(u)
for us >= 100 {
is = us % 100 * 2
us /= 100
i -= 2
setByteAt(a, i+1, byteAt(ss, is+1))
setByteAt(a, i, byteAt(ss, is))
// a[i+1], a[i] = ss[is+1], ss[is]
}
// us < 100
is = us * 2
i--
setByteAt(a, i, byteAt(ss, is+1))
// a[i] = ss[is+1]
if us >= 10 {
i--
setByteAt(a, i, byteAt(ss, is))
// a[i] = ss[is]
}
if neg {
i--
setByteAt(a, i, '-')
// a[i] = '-'
}
if quotes {
i--
setByteAt(a, i, '"')
// a[i] = '"'
}
return a[i:]
}
// MARKER: checkLitErr methods to prevent the got/expect parameters from escaping
//go:noinline
func jsonCheckLitErr3(got, expect [3]byte) {
halt.errorf("expecting %s: got %s", expect, got)
}
//go:noinline
func jsonCheckLitErr4(got, expect [4]byte) {
halt.errorf("expecting %s: got %s", expect, got)
}
func jsonSlashURune(cs [4]byte) (rr uint32) {
for _, c := range cs {
// best to use explicit if-else
// - not a table, etc which involve memory loads, array lookup with bounds checks, etc
if c >= '0' && c <= '9' {
rr = rr*16 + uint32(c-jsonU4Chk2)
} else if c >= 'a' && c <= 'f' {
rr = rr*16 + uint32(c-jsonU4Chk1)
} else if c >= 'A' && c <= 'F' {
rr = rr*16 + uint32(c-jsonU4Chk0)
} else {
return unicode.ReplacementChar
}
}
return
}
func jsonNakedNum(z *fauxUnion, bs []byte, preferFloat, signedInt bool) (err error) {
// Note: jsonNakedNum is NEVER called with a zero-length []byte
if preferFloat {
z.v = valueTypeFloat
z.f, err = parseFloat64(bs)
} else {
err = parseNumber(bs, z, signedInt)
}
return
}
//----------------------
// JsonHandle is a handle for JSON encoding format.
//
// Json is comprehensively supported:
// - decodes numbers into interface{} as int, uint or float64
// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc.
// - decode integers from float formatted numbers e.g. 1.27e+8
// - decode any json value (numbers, bool, etc) from quoted strings
// - configurable way to encode/decode []byte .
// by default, encodes and decodes []byte using base64 Std Encoding
// - UTF-8 support for encoding and decoding
//
// It has better performance than the json library in the standard library,
// by leveraging the performance improvements of the codec library.
//
// In addition, it doesn't read more bytes than necessary during a decode, which allows
// reading multiple values from a stream containing json and non-json content.
// For example, a user can read a json value, then a cbor value, then a msgpack value,
// all from the same stream in sequence.
//
// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
//
// Note also that the float values for NaN, +Inf or -Inf are encoded as null,
// as suggested by NOTE 4 of the ECMA-262 ECMAScript Language Specification 5.1 edition.
// see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf .
//
// Note the following behaviour differences vs std-library encoding/json package:
// - struct field names matched in case-sensitive manner
type JsonHandle struct {
textEncodingType
BasicHandle
// Indent indicates how a value is encoded.
// - If positive, indent by that number of spaces.
// - If negative, indent by that number of tabs.
Indent int8
// IntegerAsString controls how integers (signed and unsigned) are encoded.
//
// Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
// Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
// This can be mitigated by configuring how to encode integers.
//
// IntegerAsString interpretes the following values:
// - if 'L', then encode integers > 2^53 as a json string.
// - if 'A', then encode all integers as a json string
// containing the exact integer representation as a decimal.
// - else encode all integers as a json number (default)
IntegerAsString byte
// HTMLCharsAsIs controls how to encode some special characters to html: < > &
//
// By default, we encode them as \uXXX
// to prevent security holes when served from some browsers.
HTMLCharsAsIs bool
// PreferFloat says that we will default to decoding a number as a float.
// If not set, we will examine the characters of the number and decode as an
// integer type if it doesn't have any of the characters [.eE].
PreferFloat bool
// TermWhitespace says that we add a whitespace character
// at the end of an encoding.
//
// The whitespace is important, especially if using numbers in a context
// where multiple items are written to a stream.
TermWhitespace bool
// MapKeyAsString says to encode all map keys as strings.
//
// Use this to enforce strict json output.
// The only caveat is that nil value is ALWAYS written as null (never as "null")
MapKeyAsString bool
// _ uint64 // padding (cache line)
// Note: below, we store hardly-used items e.g. RawBytesExt.
// These values below may straddle a cache line, but they are hardly-used,
// so shouldn't contribute to false-sharing except in rare cases.
// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
// If not configured, raw bytes are encoded to/from base64 text.
RawBytesExt InterfaceExt
// TimeFormat is an array of strings representing a time.Time format, with each one being either
// a layout that honor the time.Time.Format specification.
// In addition, at most one of the set below (unix, unixmilli, unixmicro, unixnana) can be specified
// supporting encoding and decoding time as a number relative to the time epoch of Jan 1, 1970.
//
// During encode of a time.Time, the first entry in the array is used (defaults to RFC 3339).
//
// During decode,
// - if a string, then each of the layout formats will be tried in order until a time.Time is decoded.
// - if a number, then the sole unix entry is used.
TimeFormat []string
// BytesFormat is an array of strings representing how bytes are encoded.
//
// Supported values are base64 (default), base64url, base32, base32hex, base16 (synonymous with hex) and array.
//
// array is a special value configuring that bytes are encoded as a sequence of numbers.
//
// During encode of a []byte, the first entry is used (defaults to base64 if none specified).
//
// During decode
// - if a string, then attempt decoding using each format in sequence until successful.
// - if an array, then decode normally
BytesFormat []string
}
func (h *JsonHandle) isJson() bool { return true }
// Name returns the name of the handle: json
func (h *JsonHandle) Name() string { return "json" }
// func (h *JsonHandle) desc(bd byte) string { return str4byte(bd) }
func (h *JsonHandle) desc(bd byte) string { return string(bd) }
func (h *JsonHandle) typical() bool {
return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
}
// SetInterfaceExt sets an extension
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
func jsonFloatStrconvFmtPrec64(f float64) (fmt byte, prec int8) {
fmt = 'f'
prec = -1
fbits := math.Float64bits(f)
abs := math.Float64frombits(fbits &^ (1 << 63))
if abs == 0 || abs == 1 {
prec = 1
} else if abs < 1e-6 || abs >= 1e21 {
fmt = 'e'
} else if noFrac64(fbits) {
prec = 1
}
return
}
func jsonFloatStrconvFmtPrec32(f float32) (fmt byte, prec int8) {
fmt = 'f'
prec = -1
// directly handle Modf (to get fractions) and Abs (to get absolute)
fbits := math.Float32bits(f)
abs := math.Float32frombits(fbits &^ (1 << 31))
if abs == 0 || abs == 1 {
prec = 1
} else if abs < 1e-6 || abs >= 1e21 {
fmt = 'e'
} else if noFrac32(fbits) {
prec = 1
}
return
}
var errJsonNoBd = errors.New("descBd unsupported in json")

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

8339
vendor/github.com/ugorji/go/codec/json.mono.generated.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
type fastpathEJsonBytes struct {
rt reflect.Type
encfn func(*encoderJsonBytes, *encFnInfo, reflect.Value)
}
type fastpathDJsonBytes struct {
rt reflect.Type
decfn func(*decoderJsonBytes, *decFnInfo, reflect.Value)
}
type fastpathEsJsonBytes [0]fastpathEJsonBytes
type fastpathDsJsonBytes [0]fastpathDJsonBytes
func (helperEncDriverJsonBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonBytes) bool {
return false
}
func (helperDecDriverJsonBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonBytes) bool {
return false
}
func (helperEncDriverJsonBytes) fastpathEList() (v *fastpathEsJsonBytes) { return }
func (helperDecDriverJsonBytes) fastpathDList() (v *fastpathDsJsonBytes) { return }
type fastpathEJsonIO struct {
rt reflect.Type
encfn func(*encoderJsonIO, *encFnInfo, reflect.Value)
}
type fastpathDJsonIO struct {
rt reflect.Type
decfn func(*decoderJsonIO, *decFnInfo, reflect.Value)
}
type fastpathEsJsonIO [0]fastpathEJsonIO
type fastpathDsJsonIO [0]fastpathDJsonIO
func (helperEncDriverJsonIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonIO) bool {
return false
}
func (helperDecDriverJsonIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonIO) bool {
return false
}
func (helperEncDriverJsonIO) fastpathEList() (v *fastpathEsJsonIO) { return }
func (helperDecDriverJsonIO) fastpathDList() (v *fastpathDsJsonIO) { return }

View file

@ -1,235 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !codec.notmammoth
// +build codec.notmammoth
// Code generated from mammoth-test.go.tmpl - DO NOT EDIT.
package codec
import "testing"
import "fmt"
import "reflect"
// TestMammoth has all the different paths optimized in fast-path
// It has all the primitives, slices and maps.
//
// For each of those types, it has a pointer and a non-pointer field.
func init() { _ = fmt.Printf } // so we can include fmt as needed
type TestMammoth struct {
{{range .Values }}{{if .Primitive -}}
{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
func __doTestMammothSlices(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}}
var v{{$i}}va [8]{{ .Elem }}
for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } {
{{/*
// fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
// - encode value to some []byte
// - decode into a length-wise-equal []byte
// - check if equal to initial slice
// - encode ptr to the value
// - check if encode bytes are same
// - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
// - decode into non-addressable slice of equal length, then larger len
// - for each decode, compare elem-by-elem to the original slice
// -
// - rinse and repeat for a MapBySlice version
// -
*/ -}}
var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
var bs{{$i}} []byte
v{{$i}}v1 = v
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
if v == nil {
v{{$i}}v2 = make([]{{ .Elem }}, 2)
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
v{{$i}}v2 = make([]{{ .Elem }}, 2)
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
} else {
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1")
if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below
testDeepEqualErr(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, t, "equal-array-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:1:1]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap")
if len(v{{$i}}v1) > 1 {
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
v{{$i}}v2 = nil
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
if v != nil {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
testReleaseBytes(bs{{$i}})
}
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
v{{$i}}v2 = nil
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
testReleaseBytes(bs{{$i}})
}
{{end}}{{end}}{{end}}
}
func __doTestMammothMaps(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}}
for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
// fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
var bs{{$i}} []byte
v{{$i}}v1 = v
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
if v != nil {
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr")
}
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
testReleaseBytes(bs{{$i}})
// ...
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
if v != nil {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
}
}
{{end}}{{end}}{{end}}
}
func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
defer testSetup(t, &h)()
if mh, ok := h.(*MsgpackHandle); ok {
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
mh.RawToString = true
}
__doTestMammothSlices(t, h)
__doTestMammothMaps(t, h)
}
func doTestMammoth(t *testing.T, h Handle) {
defer testSetup(t, &h)()
if mh, ok := h.(*MsgpackHandle); ok {
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
mh.RawToString = true
}
name := h.Name()
var b []byte
var m, m2 TestMammoth
testRandomFillRV(reflect.ValueOf(&m).Elem())
b = testMarshalErr(&m, h, t, "mammoth-"+name)
testUnmarshalErr(&m2, b, h, t, "mammoth-"+name)
testDeepEqualErr(&m, &m2, t, "mammoth-"+name)
testReleaseBytes(b)
if testing.Short() {
t.Skipf("skipping rest of mammoth test in -short mode")
}
var mm, mm2 TestMammoth2Wrapper
testRandomFillRV(reflect.ValueOf(&mm).Elem())
b = testMarshalErr(&mm, h, t, "mammoth2-"+name)
// os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n"))
testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name)
testDeepEqualErr(&mm, &mm2, t, "mammoth2-"+name)
// testMammoth2(t, name, h)
testReleaseBytes(b)
}
{{range $i, $e := .Formats -}}
func Test{{ . }}Mammoth(t *testing.T) {
doTestMammoth(t, test{{ . }}H)
}
{{end}}
{{range $i, $e := .Formats -}}
func Test{{ . }}MammothMapsAndSlices(t *testing.T) {
doTestMammothMapsAndSlices(t, test{{ . }}H)
}
{{end}}

View file

@ -1,101 +0,0 @@
// +build !codec.notmammoth
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.
package codec
// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
//
// Note: even though this is built based on fast-path and gen-helper, we will run these tests
// in all modes, including notfastpath, etc.
//
// Add test file for creating a mammoth generated file as _mammoth_generated.go
// - generate a second mammoth files in a different file: mammoth2_generated_test.go
// mammoth-test.go.tmpl will do this
// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
// - as part of TestMammoth, run it also
// - this will cover all the codecgen, gen-helper, etc in one full run
// - check in mammoth* files into github also
//
// Now, add some types:
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
// import "encoding/binary"
import "fmt"
type TestMammoth2 struct {
{{range .Values }}{{if .Primitive }}{{/*
*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
// -----------
type testMammoth2Binary uint64
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
data = make([]byte, 8)
bigenstd.PutUint64(data, uint64(x))
return
}
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
*x = testMammoth2Binary(bigenstd.Uint64(data))
return
}
type testMammoth2Text uint64
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
data = []byte(fmt.Sprintf("%b", uint64(x)))
return
}
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
return
}
type testMammoth2Json uint64
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
data = []byte(fmt.Sprintf("%v", uint64(x)))
return
}
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
return
}
type testMammoth2Basic [4]uint64
type TestMammoth2Wrapper struct {
V TestMammoth2
T testMammoth2Text
B testMammoth2Binary
J testMammoth2Json
C testMammoth2Basic
M map[testMammoth2Basic]TestMammoth2
L []TestMammoth2
A [4]int64
Tcomplex128 complex128
Tcomplex64 complex64
Tbytes []uint8
Tpbytes *[]uint8
}

324
vendor/github.com/ugorji/go/codec/mammoth_test.go.tmpl generated vendored Normal file
View file

@ -0,0 +1,324 @@
//go:build !codec.notmammoth
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth_test.go.tmpl - DO NOT EDIT.
package codec
import "testing"
import "fmt"
import "reflect"
// TestMammoth has all the different paths optimized in fastpath
// It has all the primitives, slices and maps.
//
// For each of those types, it has a pointer and a non-pointer field.
func init() { _ = fmt.Printf } // so we can include fmt as needed
type TestMammoth struct {
{{range .Values }}{{if .Primitive -}}
{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
// -----------
// Increase codecoverage by covering all the codecgen paths, in fastpath ....
//
// Note: even though this is built based on fastpath, we will run these tests
// in all modes, including notfastpath, etc.
//
// Add test file for creating a mammoth generated file as _mammoth_generated.go
//
// Now, add some types:
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
type testMammoth2Binary uint64
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
data = make([]byte, 8)
bigenstd.PutUint64(data, uint64(x))
return
}
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
*x = testMammoth2Binary(bigenstd.Uint64(data))
return
}
type testMammoth2Text uint64
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
data = []byte(fmt.Sprintf("%b", uint64(x)))
return
}
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
return
}
type testMammoth2Json uint64
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
data = []byte(fmt.Sprintf("%v", uint64(x)))
return
}
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
return
}
type testMammoth2Basic [4]uint64
type TestMammoth2Wrapper struct {
V TestMammoth
T testMammoth2Text
B testMammoth2Binary
J testMammoth2Json
C testMammoth2Basic
M map[testMammoth2Basic]TestMammoth
L []TestMammoth
A [4]int64
Tcomplex128 complex128
Tcomplex64 complex64
Tbytes []uint8
Tpbytes *[]uint8
}
// -----------
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
func __doTestMammothSlices(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}}
var v{{$i}}va [8]{{ .Elem }}
for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } {
{{/*
// fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
// - encode value to some []byte
// - decode into a length-wise-equal []byte
// - check if equal to initial slice
// - encode ptr to the value
// - check if encode bytes are same
// - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
// - decode into non-addressable slice of equal length, then larger len
// - for each decode, compare elem-by-elem to the original slice
// -
// - rinse and repeat for a MapBySlice version
// -
*/ -}}
var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
var bs{{$i}} []byte
v{{$i}}v1 = v
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
if v == nil {
v{{$i}}v2 = make([]{{ .Elem }}, 2)
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
v{{$i}}v2 = make([]{{ .Elem }}, 2)
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
} else {
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}")
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1")
if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below
testDeepEqualErrHandle(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, h, t, "equal-array-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:1:1]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-len")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-cap")
if len(v{{$i}}v1) > 1 {
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], h, t, "equal-slice-v{{$i}}-p-len-noaddr")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], h, t, "equal-slice-v{{$i}}-p-cap-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
v{{$i}}v2 = nil
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
if v != nil {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-slice-v{{$i}}-custom")
testReleaseBytes(bs{{$i}})
}
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
v{{$i}}v2 = nil
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-slice-v{{$i}}-custom-p")
testReleaseBytes(bs{{$i}})
}
{{end}}{{end}}{{end}}
}
func __doTestMammothMaps(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}}
for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
{{/* // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) */ -}}
var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
var bs{{$i}} []byte
v{{$i}}v1 = v
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
if v != nil {
v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) // reset map
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}")
v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) // reset map
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-noaddr")
}
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-p-nil")
testReleaseBytes(bs{{$i}})
// ...
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
if v != nil {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
}
type s{{$i}}T struct {
M map[{{ .MapKey }}]{{ .Elem }}
Mp *map[{{ .MapKey }}]{{ .Elem }}
}
var m{{$i}}v99 = map[{{ .MapKey }}]{{ .Elem }}{
{{ zerocmd .MapKey }}: {{ zerocmd .Elem }},
{{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }},
}
var s{{$i}}v1, s{{$i}}v2 s{{$i}}T
bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
s{{$i}}v2 = s{{$i}}T{}
s{{$i}}v1.M = m{{$i}}v99
bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
s{{$i}}v2 = s{{$i}}T{}
s{{$i}}v1.Mp = &m{{$i}}v99
bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
}
{{end}}{{end}}{{end}}
}
func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
defer testSetup(t, &h)()
if mh, ok := h.(*MsgpackHandle); ok {
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
mh.RawToString = true
}
__doTestMammothSlices(t, h)
__doTestMammothMaps(t, h)
}
func doTestMammoth(t *testing.T, h Handle) {
defer testSetup(t, &h)()
if mh, ok := h.(*MsgpackHandle); ok {
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
mh.RawToString = true
}
name := h.Name()
var b []byte
var m, m2 TestMammoth
testRandomFillRV(reflect.ValueOf(&m).Elem())
b = testMarshalErr(&m, h, t, "mammoth-"+name)
testUnmarshalErr(&m2, b, h, t, "mammoth-"+name)
testDeepEqualErrHandle(&m, &m2, h, t, "mammoth-"+name)
testReleaseBytes(b)
if testing.Short() {
t.Skipf("skipping rest of mammoth test in -short mode")
}
var mm, mm2 TestMammoth2Wrapper
testRandomFillRV(reflect.ValueOf(&mm).Elem())
b = testMarshalErr(&mm, h, t, "mammoth2-"+name)
// os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n"))
testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name)
testDeepEqualErrHandle(&mm, &mm2, h, t, "mammoth2-"+name)
// testMammoth2(t, name, h)
testReleaseBytes(b)
}
{{range $i, $e := .Formats -}}
func Test{{ . }}Mammoth(t *testing.T) {
doTestMammoth(t, test{{ . }}H)
}
{{end}}
{{range $i, $e := .Formats -}}
func Test{{ . }}MammothMapsAndSlices(t *testing.T) {
doTestMammothMapsAndSlices(t, test{{ . }}H)
}
{{end}}

299
vendor/github.com/ugorji/go/codec/msgpack.base.go generated vendored Normal file
View file

@ -0,0 +1,299 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"fmt"
"io"
"net/rpc"
"reflect"
)
const (
mpPosFixNumMin byte = 0x00
mpPosFixNumMax byte = 0x7f
mpFixMapMin byte = 0x80
mpFixMapMax byte = 0x8f
mpFixArrayMin byte = 0x90
mpFixArrayMax byte = 0x9f
mpFixStrMin byte = 0xa0
mpFixStrMax byte = 0xbf
mpNil byte = 0xc0
_ byte = 0xc1
mpFalse byte = 0xc2
mpTrue byte = 0xc3
mpFloat byte = 0xca
mpDouble byte = 0xcb
mpUint8 byte = 0xcc
mpUint16 byte = 0xcd
mpUint32 byte = 0xce
mpUint64 byte = 0xcf
mpInt8 byte = 0xd0
mpInt16 byte = 0xd1
mpInt32 byte = 0xd2
mpInt64 byte = 0xd3
// extensions below
mpBin8 byte = 0xc4
mpBin16 byte = 0xc5
mpBin32 byte = 0xc6
mpExt8 byte = 0xc7
mpExt16 byte = 0xc8
mpExt32 byte = 0xc9
mpFixExt1 byte = 0xd4
mpFixExt2 byte = 0xd5
mpFixExt4 byte = 0xd6
mpFixExt8 byte = 0xd7
mpFixExt16 byte = 0xd8
mpStr8 byte = 0xd9 // new
mpStr16 byte = 0xda
mpStr32 byte = 0xdb
mpArray16 byte = 0xdc
mpArray32 byte = 0xdd
mpMap16 byte = 0xde
mpMap32 byte = 0xdf
mpNegFixNumMin byte = 0xe0
mpNegFixNumMax byte = 0xff
)
var mpTimeExtTag int8 = -1
var mpTimeExtTagU = uint8(mpTimeExtTag)
var mpdescNames = map[byte]string{
mpNil: "nil",
mpFalse: "false",
mpTrue: "true",
mpFloat: "float",
mpDouble: "float",
mpUint8: "uuint",
mpUint16: "uint",
mpUint32: "uint",
mpUint64: "uint",
mpInt8: "int",
mpInt16: "int",
mpInt32: "int",
mpInt64: "int",
mpStr8: "string|bytes",
mpStr16: "string|bytes",
mpStr32: "string|bytes",
mpBin8: "bytes",
mpBin16: "bytes",
mpBin32: "bytes",
mpArray16: "array",
mpArray32: "array",
mpMap16: "map",
mpMap32: "map",
}
func mpdesc(bd byte) (s string) {
s = mpdescNames[bd]
if s == "" {
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax,
bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
s = "int"
case bd >= mpFixStrMin && bd <= mpFixStrMax:
s = "string|bytes"
case bd >= mpFixArrayMin && bd <= mpFixArrayMax:
s = "array"
case bd >= mpFixMapMin && bd <= mpFixMapMax:
s = "map"
case bd >= mpFixExt1 && bd <= mpFixExt16,
bd >= mpExt8 && bd <= mpExt32:
s = "ext"
default:
s = "unknown"
}
}
return
}
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
// that the backend RPC service takes multiple arguments, which have been arranged
// in sequence in the slice.
//
// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
// array of 1 element).
type MsgpackSpecRpcMultiArgs []interface{}
// A MsgpackContainer type specifies the different types of msgpackContainers.
type msgpackContainerType struct {
fixCutoff, bFixMin, b8, b16, b32 byte
// hasFixMin, has8, has8Always bool
}
var (
msgpackContainerRawLegacy = msgpackContainerType{
32, mpFixStrMin, 0, mpStr16, mpStr32,
}
msgpackContainerStr = msgpackContainerType{
32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false,
}
msgpackContainerBin = msgpackContainerType{
0, 0, mpBin8, mpBin16, mpBin32, // false, true, true,
}
msgpackContainerList = msgpackContainerType{
16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false,
}
msgpackContainerMap = msgpackContainerType{
16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false,
}
)
//--------------------------------------------------
// MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
type MsgpackHandle struct {
binaryEncodingType
notJsonType
BasicHandle
// NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
NoFixedNum bool
// WriteExt controls whether the new spec is honored.
//
// With WriteExt=true, we can encode configured extensions with extension tags
// and encode string/[]byte/extensions in a way compatible with the new spec
// but incompatible with the old spec.
//
// For compatibility with the old spec, set WriteExt=false.
//
// With WriteExt=false:
// configured extensions are serialized as raw bytes (not msgpack extensions).
// reserved byte descriptors like Str8 and those enabling the new msgpack Binary type
// are not encoded.
WriteExt bool
// PositiveIntUnsigned says to encode positive integers as unsigned.
PositiveIntUnsigned bool
}
// Name returns the name of the handle: msgpack
func (h *MsgpackHandle) Name() string { return "msgpack" }
func (h *MsgpackHandle) desc(bd byte) string { return mpdesc(bd) }
// SetBytesExt sets an extension
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
//--------------------------------------------------
type msgpackSpecRpcCodec struct {
*rpcCodec
}
// /////////////// Spec RPC Codec ///////////////////
func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// WriteRequest can write to both a Go service, and other services that do
// not abide by the 1 argument rule of a Go service.
// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
var bodyArr []interface{}
if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
bodyArr = ([]interface{})(m)
} else {
bodyArr = []interface{}{body}
}
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
return c.write(r2)
}
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
var moe interface{}
if r.Error != "" {
moe = r.Error
}
if moe != nil && body != nil {
body = nil
}
r2 := []interface{}{1, uint32(r.Seq), moe, body}
return c.write(r2)
}
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.parseCustomHeader(1, &r.Seq, &r.Error)
}
func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
}
func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
if body == nil { // read and discard
return c.read(nil)
}
bodyArr := []interface{}{body}
return c.read(&bodyArr)
}
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
if c.cls.Load().closed {
return io.ErrUnexpectedEOF
}
// We read the response header by hand
// so that the body can be decoded on its own from the stream at a later time.
const fia byte = 0x94 //four item array descriptor value
var ba [1]byte
var n int
for {
n, err = c.r.Read(ba[:])
if err != nil {
return
}
if n == 1 {
break
}
}
var b = ba[0]
if b != fia {
err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b))
} else {
err = c.read(&b)
if err == nil {
if b != expectTypeByte {
err = fmt.Errorf("%s - expecting %v but got %x/%s", msgBadDesc, expectTypeByte, b, mpdesc(b))
} else {
err = c.read(msgid)
if err == nil {
err = c.read(methodOrError)
}
}
}
}
return
}
//--------------------------------------------------
// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
type msgpackSpecRpc struct{}
// MsgpackSpecRpc implements Rpc using the communication protocol defined in
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
//
// See GoRpc documentation, for information on buffering for better performance.
var MsgpackSpecRpc msgpackSpecRpc
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
type fastpathEMsgpackBytes struct {
rt reflect.Type
encfn func(*encoderMsgpackBytes, *encFnInfo, reflect.Value)
}
type fastpathDMsgpackBytes struct {
rt reflect.Type
decfn func(*decoderMsgpackBytes, *decFnInfo, reflect.Value)
}
type fastpathEsMsgpackBytes [0]fastpathEMsgpackBytes
type fastpathDsMsgpackBytes [0]fastpathDMsgpackBytes
func (helperEncDriverMsgpackBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackBytes) bool {
return false
}
func (helperDecDriverMsgpackBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackBytes) bool {
return false
}
func (helperEncDriverMsgpackBytes) fastpathEList() (v *fastpathEsMsgpackBytes) { return }
func (helperDecDriverMsgpackBytes) fastpathDList() (v *fastpathDsMsgpackBytes) { return }
type fastpathEMsgpackIO struct {
rt reflect.Type
encfn func(*encoderMsgpackIO, *encFnInfo, reflect.Value)
}
type fastpathDMsgpackIO struct {
rt reflect.Type
decfn func(*decoderMsgpackIO, *decFnInfo, reflect.Value)
}
type fastpathEsMsgpackIO [0]fastpathEMsgpackIO
type fastpathDsMsgpackIO [0]fastpathDMsgpackIO
func (helperEncDriverMsgpackIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackIO) bool {
return false
}
func (helperDecDriverMsgpackIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackIO) bool {
return false
}
func (helperEncDriverMsgpackIO) fastpathEList() (v *fastpathEsMsgpackIO) { return }
func (helperDecDriverMsgpackIO) fastpathDList() (v *fastpathDsMsgpackIO) { return }

File diff suppressed because it is too large Load diff

View file

@ -1,38 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import "reflect"
// This file exists, so that the files for specific formats do not all import reflect.
// This just helps us ensure that reflect package is isolated to a few files.
// SetInterfaceExt sets an extension
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetInterfaceExt sets an extension
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetBytesExt sets an extension
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetBytesExt sets an extension
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetBytesExt sets an extension
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
// return h.SetExt(rt, tag, &interfaceExtWrapper{InterfaceExt: ext})
// }

View file

@ -4,10 +4,11 @@
package codec
import (
"bufio"
"errors"
"io"
"net"
"net/rpc"
"sync/atomic"
)
var (
@ -28,57 +29,44 @@ type RPCOptions struct {
// RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
//
// Set RPCNoBuffer=true to turn buffering off.
//
// Buffering can still be done if buffered connections are passed in, or
// buffering is configured on the handle.
//
// Deprecated: Buffering should be configured at the Handle or by using a buffer Reader.
// Setting this has no effect anymore (after v1.2.12 - authored 2025-05-06)
RPCNoBuffer bool
}
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
c io.Closer
r io.Reader
w io.Writer
f ioFlusher
c io.Closer
r io.Reader
w io.Writer
f ioFlusher
nc net.Conn
dec *Decoder
enc *Encoder
h Handle
cls atomicClsErr
cls atomic.Pointer[clsErr]
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
return newRPCCodec2(conn, conn, conn, h)
}
func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
bh := h.getBasicHandle()
// if the writer can flush, ensure we leverage it, else
// we may hang waiting on read if write isn't flushed.
// var f ioFlusher
f, ok := w.(ioFlusher)
if !bh.RPCNoBuffer {
if bh.WriterBufferSize <= 0 {
if !ok { // a flusher means there's already a buffer
bw := bufio.NewWriter(w)
f, w = bw, bw
}
}
if bh.ReaderBufferSize <= 0 {
if _, ok = w.(ioBuffered); !ok {
r = bufio.NewReader(r)
}
}
}
return rpcCodec{
c: c,
w: w,
r: r,
f: f,
func newRPCCodec(conn io.ReadWriteCloser, h Handle) *rpcCodec {
nc, _ := conn.(net.Conn)
f, _ := conn.(ioFlusher)
rc := &rpcCodec{
h: h,
enc: NewEncoder(w, h),
dec: NewDecoder(r, h),
c: conn,
w: conn,
r: conn,
f: f,
nc: nc,
enc: NewEncoder(conn, h),
dec: NewDecoder(conn, h),
}
rc.cls.Store(new(clsErr))
return rc
}
func (c *rpcCodec) write(obj ...interface{}) (err error) {
@ -116,10 +104,16 @@ func (c *rpcCodec) write(obj ...interface{}) (err error) {
func (c *rpcCodec) read(obj interface{}) (err error) {
err = c.ready()
if err == nil {
//If nil is passed in, we should read and discard
// Setting ReadDeadline should not be necessary,
// especially since it only works for net.Conn (not generic ioReadCloser).
// if c.nc != nil {
// c.nc.SetReadDeadline(time.Now().Add(1 * time.Second))
// }
// Note: If nil is passed in, we should read and discard
if obj == nil {
// return c.dec.Decode(&obj)
err = c.dec.swallowErr()
err = panicToErr(c.dec, func() { c.dec.swallow() })
} else {
err = c.dec.Decode(obj)
}
@ -129,11 +123,11 @@ func (c *rpcCodec) read(obj interface{}) (err error) {
func (c *rpcCodec) Close() (err error) {
if c.c != nil {
cls := c.cls.load()
cls := c.cls.Load()
if !cls.closed {
cls.err = c.c.Close()
cls.closed = true
c.cls.store(cls)
// writing to same pointer could lead to a data race (always make new one)
cls = &clsErr{closed: true, err: c.c.Close()}
c.cls.Store(cls)
}
err = cls.err
}
@ -144,8 +138,8 @@ func (c *rpcCodec) ready() (err error) {
if c.c == nil {
err = errRpcNoConn
} else {
cls := c.cls.load()
if cls.closed {
cls := c.cls.Load()
if cls != nil && cls.closed {
if err = cls.err; err == nil {
err = errRpcIsClosed
}
@ -161,7 +155,7 @@ func (c *rpcCodec) ReadResponseBody(body interface{}) error {
// -------------------------------------
type goRpcCodec struct {
rpcCodec
*rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {

97
vendor/github.com/ugorji/go/codec/simple.base.go generated vendored Normal file
View file

@ -0,0 +1,97 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
simpleVdTime = 24
// containers: each lasts for 8 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
var simpledescNames = map[byte]string{
simpleVdNil: "null",
simpleVdFalse: "false",
simpleVdTrue: "true",
simpleVdFloat32: "float32",
simpleVdFloat64: "float64",
simpleVdPosInt: "+int",
simpleVdNegInt: "-int",
simpleVdTime: "time",
simpleVdString: "string",
simpleVdByteArray: "binary",
simpleVdArray: "array",
simpleVdMap: "map",
simpleVdExt: "ext",
}
func simpledesc(bd byte) (s string) {
s = simpledescNames[bd]
if s == "" {
s = "unknown"
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Length of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
// - time.Time are encoded as [bd] [length] [byte]...
//
// The full spec will be published soon.
type SimpleHandle struct {
binaryEncodingType
notJsonType
BasicHandle
// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
EncZeroValuesAsNil bool
}
// Name returns the name of the handle: simple
func (h *SimpleHandle) Name() string { return "simple" }
func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) }
// SetBytesExt sets an extension
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}

File diff suppressed because it is too large Load diff

View file

@ -1,111 +1,65 @@
//go:build notmono || codec.notmono
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"io"
"math"
"reflect"
"time"
)
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
simpleVdTime = 24
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
var simpledescNames = map[byte]string{
simpleVdNil: "null",
simpleVdFalse: "false",
simpleVdTrue: "true",
simpleVdFloat32: "float32",
simpleVdFloat64: "float64",
simpleVdPosInt: "+int",
simpleVdNegInt: "-int",
simpleVdTime: "time",
simpleVdString: "string",
simpleVdByteArray: "binary",
simpleVdArray: "array",
simpleVdMap: "map",
simpleVdExt: "ext",
}
func simpledesc(bd byte) (s string) {
s = simpledescNames[bd]
if s == "" {
s = "unknown"
}
return
}
type simpleEncDriver struct {
type simpleEncDriver[T encWriter] struct {
noBuiltInTypes
encDriverNoopContainerWriter
encDriverNoState
encDriverContainerNoTrackerT
encInit2er
h *SimpleHandle
e *encoderBase
// b [8]byte
e Encoder
w T
}
func (e *simpleEncDriver) encoder() *Encoder {
return &e.e
func (e *simpleEncDriver[T]) EncodeNil() {
e.w.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeNil() {
e.e.encWr.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeBool(b bool) {
func (e *simpleEncDriver[T]) EncodeBool(b bool) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b {
e.EncodeNil()
return
}
if b {
e.e.encWr.writen1(simpleVdTrue)
e.w.writen1(simpleVdTrue)
} else {
e.e.encWr.writen1(simpleVdFalse)
e.w.writen1(simpleVdFalse)
}
}
func (e *simpleEncDriver) EncodeFloat32(f float32) {
func (e *simpleEncDriver[T]) EncodeFloat32(f float32) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.e.encWr.writen1(simpleVdFloat32)
bigen.writeUint32(e.e.w(), math.Float32bits(f))
e.w.writen1(simpleVdFloat32)
e.w.writen4(bigen.PutUint32(math.Float32bits(f)))
}
func (e *simpleEncDriver) EncodeFloat64(f float64) {
func (e *simpleEncDriver[T]) EncodeFloat64(f float64) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.e.encWr.writen1(simpleVdFloat64)
bigen.writeUint64(e.e.w(), math.Float64bits(f))
e.w.writen1(simpleVdFloat64)
e.w.writen8(bigen.PutUint64(math.Float64bits(f)))
}
func (e *simpleEncDriver) EncodeInt(v int64) {
func (e *simpleEncDriver[T]) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-v), simpleVdNegInt)
} else {
@ -113,62 +67,62 @@ func (e *simpleEncDriver) EncodeInt(v int64) {
}
}
func (e *simpleEncDriver) EncodeUint(v uint64) {
func (e *simpleEncDriver[T]) EncodeUint(v uint64) {
e.encUint(v, simpleVdPosInt)
}
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
func (e *simpleEncDriver[T]) encUint(v uint64, bd uint8) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 {
e.EncodeNil()
return
}
if v <= math.MaxUint8 {
e.e.encWr.writen2(bd, uint8(v))
e.w.writen2(bd, uint8(v))
} else if v <= math.MaxUint16 {
e.e.encWr.writen1(bd + 1)
bigen.writeUint16(e.e.w(), uint16(v))
e.w.writen1(bd + 1)
e.w.writen2(bigen.PutUint16(uint16(v)))
} else if v <= math.MaxUint32 {
e.e.encWr.writen1(bd + 2)
bigen.writeUint32(e.e.w(), uint32(v))
e.w.writen1(bd + 2)
e.w.writen4(bigen.PutUint32(uint32(v)))
} else { // if v <= math.MaxUint64 {
e.e.encWr.writen1(bd + 3)
bigen.writeUint64(e.e.w(), v)
e.w.writen1(bd + 3)
e.w.writen8(bigen.PutUint64(v))
}
}
func (e *simpleEncDriver) encLen(bd byte, length int) {
func (e *simpleEncDriver[T]) encLen(bd byte, length int) {
if length == 0 {
e.e.encWr.writen1(bd)
e.w.writen1(bd)
} else if length <= math.MaxUint8 {
e.e.encWr.writen1(bd + 1)
e.e.encWr.writen1(uint8(length))
e.w.writen1(bd + 1)
e.w.writen1(uint8(length))
} else if length <= math.MaxUint16 {
e.e.encWr.writen1(bd + 2)
bigen.writeUint16(e.e.w(), uint16(length))
e.w.writen1(bd + 2)
e.w.writen2(bigen.PutUint16(uint16(length)))
} else if int64(length) <= math.MaxUint32 {
e.e.encWr.writen1(bd + 3)
bigen.writeUint32(e.e.w(), uint32(length))
e.w.writen1(bd + 3)
e.w.writen4(bigen.PutUint32(uint32(length)))
} else {
e.e.encWr.writen1(bd + 4)
bigen.writeUint64(e.e.w(), uint64(length))
e.w.writen1(bd + 4)
e.w.writen8(bigen.PutUint64(uint64(length)))
}
}
func (e *simpleEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
func (e *simpleEncDriver[T]) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
var bs0, bs []byte
if ext == SelfExt {
bs0 = e.e.blist.get(1024)
bs = bs0
e.e.sideEncode(v, basetype, &bs)
sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) })
} else {
bs = ext.WriteExt(v)
}
if bs == nil {
e.EncodeNil()
e.writeNilBytes()
goto END
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.e.encWr.writeb(bs)
e.w.writeb(bs)
END:
if ext == SelfExt {
e.e.blist.put(bs)
@ -178,25 +132,35 @@ END:
}
}
func (e *simpleEncDriver) EncodeRawExt(re *RawExt) {
func (e *simpleEncDriver[T]) EncodeRawExt(re *RawExt) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.e.encWr.writeb(re.Data)
e.w.writeb(re.Data)
}
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
func (e *simpleEncDriver[T]) encodeExtPreamble(xtag byte, length int) {
e.encLen(simpleVdExt, length)
e.e.encWr.writen1(xtag)
e.w.writen1(xtag)
}
func (e *simpleEncDriver) WriteArrayStart(length int) {
func (e *simpleEncDriver[T]) WriteArrayStart(length int) {
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) WriteMapStart(length int) {
func (e *simpleEncDriver[T]) WriteMapStart(length int) {
e.encLen(simpleVdMap, length)
}
func (e *simpleEncDriver) EncodeString(v string) {
func (e *simpleEncDriver[T]) WriteArrayEmpty() {
// e.WriteArrayStart(0) = e.encLen(simpleVdArray, 0)
e.w.writen1(simpleVdArray)
}
func (e *simpleEncDriver[T]) WriteMapEmpty() {
// e.WriteMapStart(0) = e.encLen(simpleVdMap, 0)
e.w.writen1(simpleVdMap)
}
func (e *simpleEncDriver[T]) EncodeString(v string) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" {
e.EncodeNil()
return
@ -206,57 +170,88 @@ func (e *simpleEncDriver) EncodeString(v string) {
} else {
e.encLen(simpleVdString, len(v))
}
e.e.encWr.writestr(v)
e.w.writestr(v)
}
func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) {
func (e *simpleEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) }
func (e *simpleEncDriver[T]) EncodeStringBytesRaw(v []byte) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
}
func (e *simpleEncDriver[T]) EncodeBytes(v []byte) {
if v == nil {
e.EncodeNil()
e.writeNilBytes()
return
}
e.encLen(simpleVdByteArray, len(v))
e.e.encWr.writeb(v)
e.EncodeStringBytesRaw(v)
}
func (e *simpleEncDriver) EncodeTime(t time.Time) {
func (e *simpleEncDriver[T]) encodeNilBytes() {
b := byte(simpleVdNil)
if e.h.NilCollectionToZeroLength {
b = simpleVdArray
}
e.w.writen1(b)
}
func (e *simpleEncDriver[T]) writeNilOr(v byte) {
if !e.h.NilCollectionToZeroLength {
v = simpleVdNil
}
e.w.writen1(v)
}
func (e *simpleEncDriver[T]) writeNilArray() {
e.writeNilOr(simpleVdArray)
}
func (e *simpleEncDriver[T]) writeNilMap() {
e.writeNilOr(simpleVdMap)
}
func (e *simpleEncDriver[T]) writeNilBytes() {
e.writeNilOr(simpleVdByteArray)
}
func (e *simpleEncDriver[T]) EncodeTime(t time.Time) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
if t.IsZero() {
e.EncodeNil()
return
}
v, err := t.MarshalBinary()
e.e.onerror(err)
e.e.encWr.writen2(simpleVdTime, uint8(len(v)))
e.e.encWr.writeb(v)
halt.onerror(err)
e.w.writen2(simpleVdTime, uint8(len(v)))
e.w.writeb(v)
}
//------------------------------------
type simpleDecDriver struct {
type simpleDecDriver[T decReader] struct {
h *SimpleHandle
d *decoderBase
r T
bdAndBdread
_ bool
// bytes bool
noBuiltInTypes
// decDriverNoopNumberHelper
decDriverNoopContainerReader
decDriverNoopNumberHelper
d Decoder
decInit2er
// ds interface{} // must be *decoder[simpleDecDriverM[bytes...]]
}
func (d *simpleDecDriver) decoder() *Decoder {
return &d.d
}
func (d *simpleDecDriver) descBd() string {
return sprintf("%v (%s)", d.bd, simpledesc(d.bd))
}
func (d *simpleDecDriver) readNextBd() {
d.bd = d.d.decRd.readn1()
func (d *simpleDecDriver[T]) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *simpleDecDriver) advanceNil() (null bool) {
func (d *simpleDecDriver[T]) advanceNil() (null bool) {
if !d.bdRead {
d.readNextBd()
}
@ -267,7 +262,7 @@ func (d *simpleDecDriver) advanceNil() (null bool) {
return
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
func (d *simpleDecDriver[T]) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
@ -291,88 +286,90 @@ func (d *simpleDecDriver) ContainerType() (vt valueType) {
return valueTypeUnset
}
func (d *simpleDecDriver) TryNil() bool {
func (d *simpleDecDriver[T]) TryNil() bool {
return d.advanceNil()
}
func (d *simpleDecDriver) decFloat() (f float64, ok bool) {
func (d *simpleDecDriver[T]) decFloat() (f float64, ok bool) {
ok = true
switch d.bd {
case simpleVdFloat32:
f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4())))
f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4())))
case simpleVdFloat64:
f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8()))
f = math.Float64frombits(bigen.Uint64(d.r.readn8()))
default:
ok = false
}
return
}
func (d *simpleDecDriver) decInteger() (ui uint64, neg, ok bool) {
func (d *simpleDecDriver[T]) decInteger() (ui uint64, neg, ok bool) {
ok = true
switch d.bd {
case simpleVdPosInt:
ui = uint64(d.d.decRd.readn1())
ui = uint64(d.r.readn1())
case simpleVdPosInt + 1:
ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
ui = uint64(bigen.Uint16(d.r.readn2()))
case simpleVdPosInt + 2:
ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
ui = uint64(bigen.Uint32(d.r.readn4()))
case simpleVdPosInt + 3:
ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
ui = uint64(bigen.Uint64(d.r.readn8()))
case simpleVdNegInt:
ui = uint64(d.d.decRd.readn1())
ui = uint64(d.r.readn1())
neg = true
case simpleVdNegInt + 1:
ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
ui = uint64(bigen.Uint16(d.r.readn2()))
neg = true
case simpleVdNegInt + 2:
ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
ui = uint64(bigen.Uint32(d.r.readn4()))
neg = true
case simpleVdNegInt + 3:
ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
ui = uint64(bigen.Uint64(d.r.readn8()))
neg = true
default:
ok = false
// d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
// halt.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
}
// DO NOT do this check below, because callers may only want the unsigned value:
//
// if ui > math.MaxInt64 {
// d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// halt.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// return
// }
return
}
func (d *simpleDecDriver) DecodeInt64() (i int64) {
func (d *simpleDecDriver[T]) DecodeInt64() (i int64) {
if d.advanceNil() {
return
}
i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger())
v1, v2, v3 := d.decInteger()
i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false)
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
func (d *simpleDecDriver[T]) DecodeUint64() (ui uint64) {
if d.advanceNil() {
return
}
ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger())
ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger())
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeFloat64() (f float64) {
func (d *simpleDecDriver[T]) DecodeFloat64() (f float64) {
if d.advanceNil() {
return
}
f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat())
v1, v2 := d.decFloat()
f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false)
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *simpleDecDriver) DecodeBool() (b bool) {
func (d *simpleDecDriver[T]) DecodeBool() (b bool) {
if d.advanceNil() {
return
}
@ -380,13 +377,13 @@ func (d *simpleDecDriver) DecodeBool() (b bool) {
} else if d.bd == simpleVdTrue {
b = true
} else {
d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
halt.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
}
d.bdRead = false
return
}
func (d *simpleDecDriver) ReadMapStart() (length int) {
func (d *simpleDecDriver[T]) ReadMapStart() (length int) {
if d.advanceNil() {
return containerLenNil
}
@ -394,7 +391,7 @@ func (d *simpleDecDriver) ReadMapStart() (length int) {
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayStart() (length int) {
func (d *simpleDecDriver[T]) ReadArrayStart() (length int) {
if d.advanceNil() {
return containerLenNil
}
@ -402,131 +399,128 @@ func (d *simpleDecDriver) ReadArrayStart() (length int) {
return d.decLen()
}
func (d *simpleDecDriver) uint2Len(ui uint64) int {
func (d *simpleDecDriver[T]) uint2Len(ui uint64) int {
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
halt.errorf("overflow integer: %v", ui)
}
return int(ui)
}
func (d *simpleDecDriver) decLen() int {
func (d *simpleDecDriver[T]) decLen() int {
switch d.bd & 7 { // d.bd % 8 {
case 0:
return 0
case 1:
return int(d.d.decRd.readn1())
return int(d.r.readn1())
case 2:
return int(bigen.Uint16(d.d.decRd.readn2()))
return int(bigen.Uint16(d.r.readn2()))
case 3:
return d.uint2Len(uint64(bigen.Uint32(d.d.decRd.readn4())))
return d.uint2Len(uint64(bigen.Uint32(d.r.readn4())))
case 4:
return d.uint2Len(bigen.Uint64(d.d.decRd.readn8()))
return d.uint2Len(bigen.Uint64(d.r.readn8()))
}
d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
halt.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(nil)
func (d *simpleDecDriver[T]) DecodeStringAsBytes() ([]byte, dBytesAttachState) {
return d.DecodeBytes()
}
func (d *simpleDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
d.d.decByteState = decByteStateNone
func (d *simpleDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) {
if d.advanceNil() {
return
}
var cond bool
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
if bs == nil {
d.d.decByteState = decByteStateReuseBuf
bs = d.d.b[:]
}
if d.bd >= simpleVdArray && d.bd <= simpleVdArray+4 {
slen := d.ReadArrayStart()
var changed bool
if bs, changed = usableByteSlice(bs, slen); changed {
d.d.decByteState = decByteStateNone
}
bs, cond = usableByteSlice(d.d.buf, slen)
for i := 0; i < len(bs); i++ {
bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
}
for i := len(bs); i < slen; i++ {
bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
}
return bs
if cond {
d.d.buf = bs
}
state = dBytesAttachBuffer
return
}
clen := d.decLen()
d.bdRead = false
if d.d.zerocopy() {
d.d.decByteState = decByteStateZerocopy
return d.d.decRd.rb.readx(uint(clen))
}
if bs == nil {
d.d.decByteState = decByteStateReuseBuf
bs = d.d.b[:]
}
return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs)
bs, cond = d.r.readxb(uint(clen))
state = d.d.attachState(cond)
return
}
func (d *simpleDecDriver) DecodeTime() (t time.Time) {
func (d *simpleDecDriver[T]) DecodeTime() (t time.Time) {
if d.advanceNil() {
return
}
if d.bd != simpleVdTime {
d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
halt.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
}
d.bdRead = false
clen := uint(d.d.decRd.readn1())
b := d.d.decRd.readx(clen)
d.d.onerror((&t).UnmarshalBinary(b))
clen := uint(d.r.readn1())
b := d.r.readx(clen)
halt.onerror((&t).UnmarshalBinary(b))
return
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
if xtag > 0xff {
d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
}
if d.advanceNil() {
func (d *simpleDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
xbs, _, _, ok := d.decodeExtV(ext != nil, xtag)
if !ok {
return
}
xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag))
realxtag := uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.setData(xbs, zerocopy)
} else if ext == SelfExt {
d.d.sideDecode(rv, basetype, xbs)
if ext == SelfExt {
sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) })
} else {
ext.ReadExt(rv, xbs)
}
}
func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) {
func (d *simpleDecDriver[T]) DecodeRawExt(re *RawExt) {
xbs, realxtag, state, ok := d.decodeExtV(false, 0)
if !ok {
return
}
re.Tag = uint64(realxtag)
re.setData(xbs, state >= dBytesAttachViewZerocopy)
}
func (d *simpleDecDriver[T]) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) {
if xtagIn > 0xff {
halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn)
}
if d.advanceNil() {
return
}
tag := uint8(xtagIn)
switch d.bd {
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
l := d.decLen()
xtag = d.d.decRd.readn1()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
if d.d.bytes {
xbs = d.d.decRd.rb.readx(uint(l))
zerocopy = true
} else {
xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
halt.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
xbs, ok = d.r.readxb(uint(l))
bstate = d.d.attachState(ok)
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil)
xbs, bstate = d.DecodeBytes()
default:
d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
halt.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
}
d.bdRead = false
ok = true
return
}
func (d *simpleDecDriver) DecodeNaked() {
func (d *simpleDecDriver[T]) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
@ -566,19 +560,20 @@ func (d *simpleDecDriver) DecodeNaked() {
case simpleVdString, simpleVdString + 1,
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
n.v = valueTypeString
n.s = d.d.stringZC(d.DecodeStringAsBytes())
n.s = d.d.detach2Str(d.DecodeStringAsBytes())
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
d.d.fauxUnionReadRawBytes(false)
d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.d.decRd.readn1())
if d.d.bytes {
n.l = d.d.decRd.rb.readx(uint(l))
} else {
n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
}
n.u = uint64(d.r.readn1())
n.l = d.r.readx(uint(l))
// MARKER: not necessary to detach for extensions
// var useBuf bool
// n.l, useBuf = d.r.readxb(uint(l))
// n.a = d.d.attachState(useBuf)
// n.l = d.d.detach2Bytes(n.l, nil, n.a)
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
simpleVdArray + 3, simpleVdArray + 4:
n.v = valueTypeArray
@ -587,7 +582,7 @@ func (d *simpleDecDriver) DecodeNaked() {
n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
}
if !decodeFurther {
@ -595,32 +590,18 @@ func (d *simpleDecDriver) DecodeNaked() {
}
}
func (d *simpleDecDriver) nextValueBytes(v0 []byte) (v []byte) {
func (d *simpleDecDriver[T]) nextValueBytes() (v []byte) {
if !d.bdRead {
d.readNextBd()
}
v = v0
var h = decNextValueBytesHelper{d: &d.d}
var cursor = d.d.rb.c - 1
h.append1(&v, d.bd)
v = d.nextValueBytesBdReadR(v)
d.r.startRecording()
d.nextValueBytesBdReadR()
v = d.r.stopRecording()
d.bdRead = false
h.bytesRdV(&v, cursor)
return
}
func (d *simpleDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
d.readNextBd()
v = v0
var h = decNextValueBytesHelper{d: &d.d}
h.append1(&v, d.bd)
return d.nextValueBytesBdReadR(v)
}
func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
v = v0
var h = decNextValueBytesHelper{d: &d.d}
func (d *simpleDecDriver[T]) nextValueBytesBdReadR() {
c := d.bd
var length uint
@ -629,38 +610,33 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray:
// pass
case simpleVdPosInt, simpleVdNegInt:
h.append1(&v, d.d.decRd.readn1())
d.r.readn1()
case simpleVdPosInt + 1, simpleVdNegInt + 1:
h.appendN(&v, d.d.decRd.readx(2)...)
d.r.skip(2)
case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32:
h.appendN(&v, d.d.decRd.readx(4)...)
d.r.skip(4)
case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64:
h.appendN(&v, d.d.decRd.readx(8)...)
d.r.skip(8)
case simpleVdTime:
c = d.d.decRd.readn1()
h.append1(&v, c)
h.appendN(&v, d.d.decRd.readx(uint(c))...)
c = d.r.readn1()
d.r.skip(uint(c))
default:
switch c & 7 { // c % 8 {
case 0:
length = 0
case 1:
b := d.d.decRd.readn1()
b := d.r.readn1()
length = uint(b)
h.append1(&v, b)
case 2:
x := d.d.decRd.readn2()
x := d.r.readn2()
length = uint(bigen.Uint16(x))
h.appendN(&v, x[:]...)
case 3:
x := d.d.decRd.readn4()
x := d.r.readn4()
length = uint(bigen.Uint32(x))
h.appendN(&v, x[:]...)
case 4:
x := d.d.decRd.readn8()
x := d.r.readn8()
length = uint(bigen.Uint64(x))
h.appendN(&v, x[:]...)
}
bExt := c >= simpleVdExt && c <= simpleVdExt+7
@ -670,11 +646,11 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
bMap := c >= simpleVdMap && c <= simpleVdMap+7
if !(bExt || bStr || bByteArray || bArray || bMap) {
d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
}
if bExt {
h.append1(&v, d.d.decRd.readn1()) // tag
d.r.readn1() // tag
}
if length == 0 {
@ -683,68 +659,91 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
if bArray {
for i := uint(0); i < length; i++ {
v = d.nextValueBytesR(v)
d.readNextBd()
d.nextValueBytesBdReadR()
}
} else if bMap {
for i := uint(0); i < length; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
d.readNextBd()
d.nextValueBytesBdReadR()
d.readNextBd()
d.nextValueBytesBdReadR()
}
} else {
h.appendN(&v, d.d.decRd.readx(length)...)
d.r.skip(length)
}
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
// ----
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Length of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
// - time.Time are encoded as [bd] [length] [byte]...
// The following below are similar across all format files (except for the format name).
//
// The full spec will be published soon.
type SimpleHandle struct {
binaryEncodingType
BasicHandle
// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
EncZeroValuesAsNil bool
// We keep them together here, so that we can easily copy and compare.
// ----
func (d *simpleEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) {
callMake(&d.w)
d.h = hh.(*SimpleHandle)
d.e = shared
if shared.bytes {
fp = simpleFpEncBytes
} else {
fp = simpleFpEncIO
}
// d.w.init()
d.init2(enc)
return
}
// Name returns the name of the handle: simple
func (h *SimpleHandle) Name() string { return "simple" }
func (e *simpleEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) }
func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) }
func (e *simpleEncDriver[T]) writerEnd() { e.w.end() }
func (h *SimpleHandle) newEncDriver() encDriver {
var e = &simpleEncDriver{h: h}
e.e.e = e
e.e.init(h)
e.reset()
return e
func (e *simpleEncDriver[T]) resetOutBytes(out *[]byte) {
e.w.resetBytes(*out, out)
}
func (h *SimpleHandle) newDecDriver() decDriver {
d := &simpleDecDriver{h: h}
d.d.d = d
d.d.init(h)
d.reset()
return d
func (e *simpleEncDriver[T]) resetOutIO(out io.Writer) {
e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist)
}
var _ decDriver = (*simpleDecDriver)(nil)
var _ encDriver = (*simpleEncDriver)(nil)
// ----
func (d *simpleDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) {
callMake(&d.r)
d.h = hh.(*SimpleHandle)
d.d = shared
if shared.bytes {
fp = simpleFpDecBytes
} else {
fp = simpleFpDecIO
}
// d.r.init()
d.init2(dec)
return
}
func (d *simpleDecDriver[T]) NumBytesRead() int {
return int(d.r.numread())
}
func (d *simpleDecDriver[T]) resetInBytes(in []byte) {
d.r.resetBytes(in)
}
func (d *simpleDecDriver[T]) resetInIO(r io.Reader) {
d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist)
}
// ---- (custom stanza)
func (d *simpleDecDriver[T]) descBd() string {
return sprintf("%v (%s)", d.bd, simpledesc(d.bd))
}
func (d *simpleDecDriver[T]) DecodeFloat32() (f float32) {
return float32(chkOvf.Float32V(d.DecodeFloat64()))
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
)
type fastpathESimpleBytes struct {
rt reflect.Type
encfn func(*encoderSimpleBytes, *encFnInfo, reflect.Value)
}
type fastpathDSimpleBytes struct {
rt reflect.Type
decfn func(*decoderSimpleBytes, *decFnInfo, reflect.Value)
}
type fastpathEsSimpleBytes [0]fastpathESimpleBytes
type fastpathDsSimpleBytes [0]fastpathDSimpleBytes
func (helperEncDriverSimpleBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleBytes) bool {
return false
}
func (helperDecDriverSimpleBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleBytes) bool {
return false
}
func (helperEncDriverSimpleBytes) fastpathEList() (v *fastpathEsSimpleBytes) { return }
func (helperDecDriverSimpleBytes) fastpathDList() (v *fastpathDsSimpleBytes) { return }
type fastpathESimpleIO struct {
rt reflect.Type
encfn func(*encoderSimpleIO, *encFnInfo, reflect.Value)
}
type fastpathDSimpleIO struct {
rt reflect.Type
decfn func(*decoderSimpleIO, *decFnInfo, reflect.Value)
}
type fastpathEsSimpleIO [0]fastpathESimpleIO
type fastpathDsSimpleIO [0]fastpathDSimpleIO
func (helperEncDriverSimpleIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleIO) bool {
return false
}
func (helperDecDriverSimpleIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleIO) bool {
return false
}
func (helperEncDriverSimpleIO) fastpathEList() (v *fastpathEsSimpleIO) { return }
func (helperDecDriverSimpleIO) fastpathDList() (v *fastpathDsSimpleIO) { return }

View file

@ -1,148 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
package codec
import (
"bytes"
"reflect"
"time"
)
type stringSlice []string
func (p stringSlice) Len() int { return len(p) }
func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringSlice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type uint8Slice []uint8
func (p uint8Slice) Len() int { return len(p) }
func (p uint8Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint8Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint64Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type intSlice []int
func (p intSlice) Len() int { return len(p) }
func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p intSlice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type int32Slice []int32
func (p int32Slice) Len() int { return len(p) }
func (p int32Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p int32Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type stringRv struct {
v string
r reflect.Value
}
type stringRvSlice []stringRv
func (p stringRvSlice) Len() int { return len(p) }
func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringRvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type stringIntf struct {
v string
i interface{}
}
type stringIntfSlice []stringIntf
func (p stringIntfSlice) Len() int { return len(p) }
func (p stringIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringIntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type float64Rv struct {
v float64
r reflect.Value
}
type float64RvSlice []float64Rv
func (p float64RvSlice) Len() int { return len(p) }
func (p float64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p float64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v || isNaN64(p[uint(i)].v) && !isNaN64(p[uint(j)].v)
}
type uint64Rv struct {
v uint64
r reflect.Value
}
type uint64RvSlice []uint64Rv
func (p uint64RvSlice) Len() int { return len(p) }
func (p uint64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type int64Rv struct {
v int64
r reflect.Value
}
type int64RvSlice []int64Rv
func (p int64RvSlice) Len() int { return len(p) }
func (p int64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p int64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type timeRv struct {
v time.Time
r reflect.Value
}
type timeRvSlice []timeRv
func (p timeRvSlice) Len() int { return len(p) }
func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p timeRvSlice) Less(i, j int) bool {
return p[uint(i)].v.Before(p[uint(j)].v)
}
type bytesRv struct {
v []byte
r reflect.Value
}
type bytesRvSlice []bytesRv
func (p bytesRvSlice) Len() int { return len(p) }
func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p bytesRvSlice) Less(i, j int) bool {
return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1
}
type bytesIntf struct {
v []byte
i interface{}
}
type bytesIntfSlice []bytesIntf
func (p bytesIntfSlice) Len() int { return len(p) }
func (p bytesIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p bytesIntfSlice) Less(i, j int) bool {
return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1
}

View file

@ -1,68 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
{{/*
xxxSlice
xxxIntf
xxxIntfSlice
xxxRv
xxxRvSlice
I'm now going to create them for
- sortables
- sortablesplus
With the parameters passed in sortables or sortablesplus,
'time, 'bytes' are special, and correspond to time.Time and []byte respectively.
*/}}
package codec
import (
"time"
"reflect"
"bytes"
)
{{/* func init() { _ = time.Unix } */}}
{{define "T"}}
func (p {{ .Type }}) Len() int { return len(p) }
func (p {{ .Type }}) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p {{ .Type }}) Less(i, j int) bool {
{{ if eq .Kind "bool" }} return !p[uint(i)]{{.V}} && p[uint(j)]{{.V}}
{{ else if eq .Kind "float32" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN32(p[uint(i)]{{.V}}) && !isNaN32(p[uint(j)]{{.V}})
{{ else if eq .Kind "float64" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN64(p[uint(i)]{{.V}}) && !isNaN64(p[uint(j)]{{.V}})
{{ else if eq .Kind "time" }} return p[uint(i)]{{.V}}.Before(p[uint(j)]{{.V}})
{{ else if eq .Kind "bytes" }} return bytes.Compare(p[uint(i)]{{.V}}, p[uint(j)]{{.V}}) == -1
{{ else }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}}
{{ end -}}
}
{{end}}
{{range $i, $v := sortables }}{{ $t := tshort $v }}
type {{ $v }}Slice []{{ $t }}
{{template "T" args "Kind" $v "Type" (print $v "Slice") "V" ""}}
{{end}}
{{range $i, $v := sortablesplus }}{{ $t := tshort $v }}
type {{ $v }}Rv struct {
v {{ $t }}
r reflect.Value
}
type {{ $v }}RvSlice []{{ $v }}Rv
{{template "T" args "Kind" $v "Type" (print $v "RvSlice") "V" ".v"}}
{{if eq $v "bytes" "string" -}}
type {{ $v }}Intf struct {
v {{ $t }}
i interface{}
}
type {{ $v }}IntfSlice []{{ $v }}Intf
{{template "T" args "Kind" $v "Type" (print $v "IntfSlice") "V" ".v"}}
{{end}}
{{end}}

View file

@ -3,10 +3,14 @@
package codec
import "io"
import (
"io"
)
const maxConsecutiveEmptyWrites = 16 // 2 is sufficient, 16 is enough, 64 is optimal
// encWriter abstracts writing to a byte array or to an io.Writer.
type encWriter interface {
type encWriterI interface {
writeb([]byte)
writestr(string)
writeqstr(string) // write string wrapped in quotes ie "..."
@ -17,7 +21,11 @@ type encWriter interface {
writen4([4]byte)
writen8([8]byte)
// isBytes() bool
end()
resetIO(w io.Writer, bufsize int, blist *bytesFreeList)
resetBytes(in []byte, out *[]byte)
}
// ---------------------------------------------
@ -32,16 +40,18 @@ type bufioEncWriter struct {
b [16]byte // scratch buffer and padding (cache-aligned)
}
func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
// MARKER: use setByteAt/byteAt to elide the bounds-checks
// when we are sure that we don't go beyond the bounds.
func (z *bufioEncWriter) resetBytes(in []byte, out *[]byte) {
halt.errorStr("resetBytes is unsupported by bufioEncWriter")
}
func (z *bufioEncWriter) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) {
z.w = w
z.n = 0
if bufsize <= 0 {
bufsize = defEncByteBufSize
}
// bufsize must be >= 8, to accomodate writen methods (where n <= 8)
if bufsize <= 8 {
bufsize = 8
}
// use minimum bufsize of 16, matching the array z.b and accomodating writen methods (where n <= 8)
bufsize = max(16, bufsize) // max(byteBufSize, bufsize)
if cap(z.buf) < bufsize {
if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
blist.put(z.buf)
@ -56,17 +66,19 @@ func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
}
func (z *bufioEncWriter) flushErr() (err error) {
n, err := z.w.Write(z.buf[:z.n])
z.n -= n
if z.n > 0 {
if err == nil {
err = io.ErrShortWrite
var n int
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err = z.w.Write(z.buf[:z.n])
z.n -= n
if z.n == 0 || err != nil {
return
}
// at this point: z.n > 0 && err == nil
if n > 0 {
copy(z.buf, z.buf[n:z.n+n])
}
}
return err
return io.ErrShortWrite // OR io.ErrNoProgress: not enough (or no) data written
}
func (z *bufioEncWriter) flush() {
@ -131,6 +143,7 @@ func (z *bufioEncWriter) writen1(b1 byte) {
// z.buf[z.n] = b1
z.n++
}
func (z *bufioEncWriter) writen2(b1, b2 byte) {
if 2 > len(z.buf)-z.n {
z.flush()
@ -169,8 +182,14 @@ func (z *bufioEncWriter) endErr() (err error) {
return
}
func (z *bufioEncWriter) end() {
halt.onerror(z.endErr())
}
// ---------------------------------------------
var bytesEncAppenderDefOut = []byte{}
// bytesEncAppender implements encWriter and can write to an byte slice.
type bytesEncAppender struct {
b []byte
@ -203,122 +222,18 @@ func (z *bytesEncAppender) writen4(b [4]byte) {
func (z *bytesEncAppender) writen8(b [8]byte) {
z.b = append(z.b, b[:]...)
// z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]) // prevents inlining encWr.writen4
// z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7])
}
func (z *bytesEncAppender) endErr() error {
func (z *bytesEncAppender) end() {
*(z.out) = z.b
return nil
}
func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
func (z *bytesEncAppender) resetBytes(in []byte, out *[]byte) {
z.b = in[:0]
z.out = out
}
// --------------------------------------------------
type encWr struct {
wb bytesEncAppender
wf *bufioEncWriter
bytes bool // encoding to []byte
// MARKER: these fields below should belong directly in Encoder.
// we pack them here for space efficiency and cache-line optimization.
js bool // is json encoder?
be bool // is binary encoder?
c containerState
calls uint16
seq uint16 // sequencer (e.g. used by binc for symbols, etc)
func (z *bytesEncAppender) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) {
halt.errorStr("resetIO is unsupported by bytesEncAppender")
}
// MARKER: manually inline bytesEncAppender.writenx/writeqstr methods,
// as calling them causes encWr.writenx/writeqstr methods to not be inlined (cost > 80).
//
// i.e. e.g. instead of writing z.wb.writen2(b1, b2), use z.wb.b = append(z.wb.b, b1, b2)
func (z *encWr) writeb(s []byte) {
if z.bytes {
z.wb.writeb(s)
} else {
z.wf.writeb(s)
}
}
func (z *encWr) writestr(s string) {
if z.bytes {
z.wb.writestr(s)
} else {
z.wf.writestr(s)
}
}
// MARKER: Add WriteStr to be called directly by generated code without a genHelper forwarding function.
// Go's inlining model adds cost for forwarding functions, preventing inlining (cost goes above 80 budget).
func (z *encWr) WriteStr(s string) {
if z.bytes {
z.wb.writestr(s)
} else {
z.wf.writestr(s)
}
}
func (z *encWr) writen1(b1 byte) {
if z.bytes {
z.wb.writen1(b1)
} else {
z.wf.writen1(b1)
}
}
func (z *encWr) writen2(b1, b2 byte) {
if z.bytes {
// MARKER: z.wb.writen2(b1, b2)
z.wb.b = append(z.wb.b, b1, b2)
} else {
z.wf.writen2(b1, b2)
}
}
func (z *encWr) writen4(b [4]byte) {
if z.bytes {
// MARKER: z.wb.writen4(b1, b2, b3, b4)
z.wb.b = append(z.wb.b, b[:]...)
// z.wb.writen4(b)
} else {
z.wf.writen4(b)
}
}
func (z *encWr) writen8(b [8]byte) {
if z.bytes {
// z.wb.b = append(z.wb.b, b[:]...)
z.wb.writen8(b)
} else {
z.wf.writen8(b)
}
}
func (z *encWr) writeqstr(s string) {
if z.bytes {
// MARKER: z.wb.writeqstr(s)
z.wb.b = append(append(append(z.wb.b, '"'), s...), '"')
} else {
z.wf.writeqstr(s)
}
}
func (z *encWr) endErr() error {
if z.bytes {
return z.wb.endErr()
}
return z.wf.endErr()
}
func (z *encWr) end() {
halt.onerror(z.endErr())
}
var _ encWriter = (*encWr)(nil)