mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-10-28 20:02:24 -05:00
[chore] update go dependencies (#4304)
- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix
Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
7712885038
commit
8b0ea56027
294 changed files with 139999 additions and 21873 deletions
30
vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go
generated
vendored
30
vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go
generated
vendored
|
|
@ -169,7 +169,7 @@ func SetGoMemLimitWithOpts(opts ...Option) (_ int64, _err error) {
|
|||
|
||||
// set the memory limit and start refresh
|
||||
limit, err := updateGoMemLimit(uint64(snapshot), provider, cfg.logger)
|
||||
go refresh(provider, cfg.logger, cfg.refresh)
|
||||
refresh(provider, cfg.logger, cfg.refresh)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoLimit) {
|
||||
cfg.logger.Info("memory is not limited, skipping")
|
||||
|
|
@ -200,7 +200,7 @@ func updateGoMemLimit(currLimit uint64, provider Provider, logger *slog.Logger)
|
|||
return newLimit, nil
|
||||
}
|
||||
|
||||
// refresh periodically fetches the memory limit from the provider and reapplies it if it has changed.
|
||||
// refresh spawns a goroutine that runs every refresh duration and updates the GOMEMLIMIT if it has changed.
|
||||
// See more details in the documentation of WithRefreshInterval.
|
||||
func refresh(provider Provider, logger *slog.Logger, refresh time.Duration) {
|
||||
if refresh == 0 {
|
||||
|
|
@ -210,22 +210,24 @@ func refresh(provider Provider, logger *slog.Logger, refresh time.Duration) {
|
|||
provider = noErrNoLimitProvider(provider)
|
||||
|
||||
t := time.NewTicker(refresh)
|
||||
for range t.C {
|
||||
err := func() (_err error) {
|
||||
snapshot := debug.SetMemoryLimit(-1)
|
||||
defer rollbackOnPanic(logger, snapshot, &_err)
|
||||
go func() {
|
||||
for range t.C {
|
||||
err := func() (_err error) {
|
||||
snapshot := debug.SetMemoryLimit(-1)
|
||||
defer rollbackOnPanic(logger, snapshot, &_err)
|
||||
|
||||
_, err := updateGoMemLimit(uint64(snapshot), provider, logger)
|
||||
_, err := updateGoMemLimit(uint64(snapshot), provider, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
logger.Error("failed to refresh GOMEMLIMIT", slog.Any("error", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
logger.Error("failed to refresh GOMEMLIMIT", slog.Any("error", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// rollbackOnPanic rollbacks to the snapshot on panic.
|
||||
|
|
|
|||
3
vendor/github.com/bytedance/sonic/.gitmodules
generated
vendored
3
vendor/github.com/bytedance/sonic/.gitmodules
generated
vendored
|
|
@ -4,3 +4,6 @@
|
|||
[submodule "tools/simde"]
|
||||
path = tools/simde
|
||||
url = https://github.com/simd-everywhere/simde.git
|
||||
[submodule "fuzz/go-fuzz-corpus"]
|
||||
path = fuzz/go-fuzz-corpus
|
||||
url = https://github.com/dvyukov/go-fuzz-corpus.git
|
||||
|
|
|
|||
4
vendor/github.com/bytedance/sonic/README.md
generated
vendored
4
vendor/github.com/bytedance/sonic/README.md
generated
vendored
|
|
@ -385,12 +385,12 @@ See [ast/visitor.go](https://github.com/bytedance/sonic/blob/main/ast/visitor.go
|
|||
|
||||
## Compatibility
|
||||
|
||||
For developers who want to use sonic to meet diffirent scenarios, we provide some integrated configs as `sonic.API`
|
||||
For developers who want to use sonic to meet different scenarios, we provide some integrated configs as `sonic.API`
|
||||
|
||||
- `ConfigDefault`: the sonic's default config (`EscapeHTML=false`,`SortKeys=false`...) to run sonic fast meanwhile ensure security.
|
||||
- `ConfigStd`: the std-compatible config (`EscapeHTML=true`,`SortKeys=true`...)
|
||||
- `ConfigFastest`: the fastest config (`NoQuoteTextMarshaler=true`) to run on sonic as fast as possible.
|
||||
Sonic **DOES NOT** ensure to support all environments, due to the difficulty of developing high-performance codes. On non-sonic-supporting environment, the implementation will fall back to `encoding/json`. Thus beflow configs will all equal to `ConfigStd`.
|
||||
Sonic **DOES NOT** ensure to support all environments, due to the difficulty of developing high-performance codes. On non-sonic-supporting environment, the implementation will fall back to `encoding/json`. Thus below configs will all equal to `ConfigStd`.
|
||||
|
||||
## Tips
|
||||
|
||||
|
|
|
|||
4
vendor/github.com/bytedance/sonic/api.go
generated
vendored
4
vendor/github.com/bytedance/sonic/api.go
generated
vendored
|
|
@ -94,6 +94,9 @@ type Config struct {
|
|||
|
||||
// Encode Infinity or Nan float into `null`, instead of returning an error.
|
||||
EncodeNullForInfOrNan bool
|
||||
|
||||
// CaseSensitive indicates that the decoder should not ignore the case of object keys.
|
||||
CaseSensitive bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -111,7 +114,6 @@ var (
|
|||
|
||||
// ConfigFastest is the fastest config of APIs, aiming at speed.
|
||||
ConfigFastest = Config{
|
||||
NoQuoteTextMarshaler: true,
|
||||
NoValidateJSONMarshaler: true,
|
||||
NoValidateJSONSkip: true,
|
||||
}.Froze()
|
||||
|
|
|
|||
2
vendor/github.com/bytedance/sonic/ast/iterator.go
generated
vendored
2
vendor/github.com/bytedance/sonic/ast/iterator.go
generated
vendored
|
|
@ -176,7 +176,7 @@ type Scanner func(path Sequence, node *Node) bool
|
|||
// Especially, if the node is not V_ARRAY or V_OBJECT,
|
||||
// the node itself will be returned and Sequence.Index == -1.
|
||||
//
|
||||
// NOTICE: A unsetted node WON'T trigger sc, but its index still counts into Path.Index
|
||||
// NOTICE: An unset node WON'T trigger sc, but its index still counts into Path.Index
|
||||
func (self *Node) ForEach(sc Scanner) error {
|
||||
if err := self.checkRaw(); err != nil {
|
||||
return err
|
||||
|
|
|
|||
29
vendor/github.com/bytedance/sonic/ast/node.go
generated
vendored
29
vendor/github.com/bytedance/sonic/ast/node.go
generated
vendored
|
|
@ -509,6 +509,23 @@ func (self *Node) Float64() (float64, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (self *Node) StrictBool() (bool, error) {
|
||||
if err := self.checkRaw(); err!= nil {
|
||||
return false, err
|
||||
}
|
||||
switch self.t {
|
||||
case types.V_TRUE : return true, nil
|
||||
case types.V_FALSE : return false, nil
|
||||
case _V_ANY :
|
||||
any := self.packAny()
|
||||
switch v := any.(type) {
|
||||
case bool : return v, nil
|
||||
default : return false, ErrUnsupportType
|
||||
}
|
||||
default : return false, ErrUnsupportType
|
||||
}
|
||||
}
|
||||
|
||||
// Float64 exports underlying float64 value, including V_NUMBER, V_ANY
|
||||
func (self *Node) StrictFloat64() (float64, error) {
|
||||
if err := self.checkRaw(); err != nil {
|
||||
|
|
@ -776,7 +793,7 @@ func (self *Node) Pop() error {
|
|||
}
|
||||
|
||||
// Move moves the child at src index to dst index,
|
||||
// meanwhile slides sliblings from src+1 to dst.
|
||||
// meanwhile slides siblings from src+1 to dst.
|
||||
//
|
||||
// WARN: this will change address of elements, which is a dangerous action.
|
||||
func (self *Node) Move(dst, src int) error {
|
||||
|
|
@ -816,7 +833,7 @@ func (self *Node) Move(dst, src int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetAny wraps val with V_ANY node, and Add() the node.
|
||||
// AddAny wraps val with V_ANY node, and Add() the node.
|
||||
func (self *Node) AddAny(val interface{}) error {
|
||||
return self.Add(NewAny(val))
|
||||
}
|
||||
|
|
@ -938,7 +955,7 @@ func (self *Node) Map() (map[string]interface{}, error) {
|
|||
return self.toGenericObject()
|
||||
}
|
||||
|
||||
// MapUseNumber loads all keys of an object node, with numeric nodes casted to json.Number
|
||||
// MapUseNumber loads all keys of an object node, with numeric nodes cast to json.Number
|
||||
func (self *Node) MapUseNumber() (map[string]interface{}, error) {
|
||||
if self.isAny() {
|
||||
any := self.packAny()
|
||||
|
|
@ -1083,7 +1100,7 @@ func (self *Node) Array() ([]interface{}, error) {
|
|||
return self.toGenericArray()
|
||||
}
|
||||
|
||||
// ArrayUseNumber loads all indexes of an array node, with numeric nodes casted to json.Number
|
||||
// ArrayUseNumber loads all indexes of an array node, with numeric nodes cast to json.Number
|
||||
func (self *Node) ArrayUseNumber() ([]interface{}, error) {
|
||||
if self.isAny() {
|
||||
any := self.packAny()
|
||||
|
|
@ -1149,7 +1166,7 @@ func (self *Node) unsafeArray() (*linkedNodes, error) {
|
|||
|
||||
// Interface loads all children under all paths from this node,
|
||||
// and converts itself as generic type.
|
||||
// WARN: all numeric nodes are casted to float64
|
||||
// WARN: all numeric nodes are cast to float64
|
||||
func (self *Node) Interface() (interface{}, error) {
|
||||
if err := self.checkRaw(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1193,7 +1210,7 @@ func (self *Node) packAny() interface{} {
|
|||
}
|
||||
|
||||
// InterfaceUseNumber works same with Interface()
|
||||
// except numeric nodes are casted to json.Number
|
||||
// except numeric nodes are cast to json.Number
|
||||
func (self *Node) InterfaceUseNumber() (interface{}, error) {
|
||||
if err := self.checkRaw(); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
8
vendor/github.com/bytedance/sonic/ast/parser.go
generated
vendored
8
vendor/github.com/bytedance/sonic/ast/parser.go
generated
vendored
|
|
@ -63,7 +63,7 @@ func (self *Parser) delim() types.ParsingError {
|
|||
return types.ERR_EOF
|
||||
}
|
||||
|
||||
/* check for the delimtier */
|
||||
/* check for the delimiter */
|
||||
if self.s[p] != ':' {
|
||||
return types.ERR_INVALID_CHAR
|
||||
}
|
||||
|
|
@ -82,7 +82,7 @@ func (self *Parser) object() types.ParsingError {
|
|||
return types.ERR_EOF
|
||||
}
|
||||
|
||||
/* check for the delimtier */
|
||||
/* check for the delimiter */
|
||||
if self.s[p] != '{' {
|
||||
return types.ERR_INVALID_CHAR
|
||||
}
|
||||
|
|
@ -101,7 +101,7 @@ func (self *Parser) array() types.ParsingError {
|
|||
return types.ERR_EOF
|
||||
}
|
||||
|
||||
/* check for the delimtier */
|
||||
/* check for the delimiter */
|
||||
if self.s[p] != '[' {
|
||||
return types.ERR_INVALID_CHAR
|
||||
}
|
||||
|
|
@ -638,7 +638,7 @@ func Loads(src string) (int, interface{}, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// LoadsUseNumber parse all json into interface{}, with numeric nodes casted to json.Number
|
||||
// LoadsUseNumber parse all json into interface{}, with numeric nodes cast to json.Number
|
||||
func LoadsUseNumber(src string) (int, interface{}, error) {
|
||||
ps := &Parser{s: src}
|
||||
np, err := ps.Parse()
|
||||
|
|
|
|||
6
vendor/github.com/bytedance/sonic/ast/visitor.go
generated
vendored
6
vendor/github.com/bytedance/sonic/ast/visitor.go
generated
vendored
|
|
@ -178,7 +178,7 @@ func (self *traverser) decodeArray() error {
|
|||
/* allocate array space and parse every element */
|
||||
if err := self.visitor.OnArrayBegin(_DEFAULT_NODE_CAP); err != nil {
|
||||
if err == VisitOPSkip {
|
||||
// NOTICE: for user needs to skip entiry object
|
||||
// NOTICE: for user needs to skip entry object
|
||||
self.parser.p -= 1
|
||||
if _, e := self.parser.skipFast(); e != 0 {
|
||||
return e
|
||||
|
|
@ -233,7 +233,7 @@ func (self *traverser) decodeObject() error {
|
|||
/* allocate object space and decode each pair */
|
||||
if err := self.visitor.OnObjectBegin(_DEFAULT_NODE_CAP); err != nil {
|
||||
if err == VisitOPSkip {
|
||||
// NOTICE: for user needs to skip entiry object
|
||||
// NOTICE: for user needs to skip entry object
|
||||
self.parser.p -= 1
|
||||
if _, e := self.parser.skipFast(); e != 0 {
|
||||
return e
|
||||
|
|
@ -328,5 +328,5 @@ func (self *traverser) decodeString(iv int64, ep int) error {
|
|||
}
|
||||
|
||||
// If visitor return this error on `OnObjectBegin()` or `OnArrayBegin()`,
|
||||
// the transverer will skip entiry object or array
|
||||
// the traverser will skip entry object or array
|
||||
var VisitOPSkip = errors.New("")
|
||||
|
|
|
|||
12
vendor/github.com/bytedance/sonic/compat.go
generated
vendored
12
vendor/github.com/bytedance/sonic/compat.go
generated
vendored
|
|
@ -87,7 +87,17 @@ func (cfg frozenConfig) UnmarshalFromString(buf string, val interface{}) error {
|
|||
if cfg.DisallowUnknownFields {
|
||||
dec.DisallowUnknownFields()
|
||||
}
|
||||
return dec.Decode(val)
|
||||
err := dec.Decode(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check the trailing chars
|
||||
offset := dec.InputOffset()
|
||||
if t, err := dec.Token(); !(t == nil && err == io.EOF) {
|
||||
return &json.SyntaxError{ Offset: offset}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshal is implemented by sonic
|
||||
|
|
|
|||
9
vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go
generated
vendored
9
vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go
generated
vendored
|
|
@ -76,11 +76,12 @@ func (self *StreamDecoder) Decode(val interface{}) (err error) {
|
|||
if y := native.SkipOneFast(&src, &x); y < 0 {
|
||||
if self.readMore() {
|
||||
goto try_skip
|
||||
} else {
|
||||
err = SyntaxError{e, self.s, types.ParsingError(-s), ""}
|
||||
self.setErr(err)
|
||||
return
|
||||
}
|
||||
if self.err == nil {
|
||||
self.err = SyntaxError{e, self.s, types.ParsingError(-s), ""}
|
||||
self.setErr(self.err)
|
||||
}
|
||||
return self.err
|
||||
} else {
|
||||
s = y + s
|
||||
e = x + s
|
||||
|
|
|
|||
18
vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go
generated
vendored
18
vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go
generated
vendored
|
|
@ -12,7 +12,7 @@ import (
|
|||
type Context struct {
|
||||
Parser *Parser
|
||||
efacePool *efacePool
|
||||
Stack bounedStack
|
||||
Stack boundedStack
|
||||
Utf8Inv bool
|
||||
}
|
||||
|
||||
|
|
@ -26,20 +26,20 @@ type parentStat struct {
|
|||
con unsafe.Pointer
|
||||
remain uint64
|
||||
}
|
||||
type bounedStack struct {
|
||||
type boundedStack struct {
|
||||
stack []parentStat
|
||||
index int
|
||||
}
|
||||
|
||||
func newStack(size int) bounedStack {
|
||||
return bounedStack{
|
||||
func newStack(size int) boundedStack {
|
||||
return boundedStack{
|
||||
stack: make([]parentStat, size + 2),
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func (s *bounedStack) Pop() (unsafe.Pointer, int, bool){
|
||||
func (s *boundedStack) Pop() (unsafe.Pointer, int, bool){
|
||||
s.index--
|
||||
con := s.stack[s.index].con
|
||||
remain := s.stack[s.index].remain &^ (uint64(1) << 63)
|
||||
|
|
@ -50,7 +50,7 @@ func (s *bounedStack) Pop() (unsafe.Pointer, int, bool){
|
|||
}
|
||||
|
||||
//go:nosplit
|
||||
func (s *bounedStack) Push(p unsafe.Pointer, remain int, isObj bool) {
|
||||
func (s *boundedStack) Push(p unsafe.Pointer, remain int, isObj bool) {
|
||||
s.stack[s.index].con = p
|
||||
s.stack[s.index].remain = uint64(remain)
|
||||
if isObj {
|
||||
|
|
@ -1253,7 +1253,7 @@ func (node *Node) AsEfaceFallback(ctx *Context) (interface{}, error) {
|
|||
if ctx.Parser.options & (1 << _F_use_number) != 0 {
|
||||
num, ok := node.AsNumber(ctx)
|
||||
if !ok {
|
||||
// skip the unmacthed type
|
||||
// skip the unmatched type
|
||||
*node = NewNode(node.Next())
|
||||
return nil, newUnmatched(node.Position(), rt.JsonNumberType)
|
||||
} else {
|
||||
|
|
@ -1275,13 +1275,13 @@ func (node *Node) AsEfaceFallback(ctx *Context) (interface{}, error) {
|
|||
return f, nil
|
||||
}
|
||||
|
||||
// skip the unmacthed type
|
||||
// skip the unmatched type
|
||||
*node = NewNode(node.Next())
|
||||
return nil, newUnmatched(node.Position(), rt.Int64Type)
|
||||
} else {
|
||||
num, ok := node.AsF64(ctx)
|
||||
if !ok {
|
||||
// skip the unmacthed type
|
||||
// skip the unmatched type
|
||||
*node = NewNode(node.Next())
|
||||
return nil, newUnmatched(node.Position(), rt.Float64Type)
|
||||
} else {
|
||||
|
|
|
|||
13
vendor/github.com/bytedance/sonic/internal/encoder/alg/mapiter.go
generated
vendored
13
vendor/github.com/bytedance/sonic/internal/encoder/alg/mapiter.go
generated
vendored
|
|
@ -97,17 +97,18 @@ func (self *MapIterator) append(t *rt.GoType, k unsafe.Pointer, v unsafe.Pointer
|
|||
|
||||
func (self *MapIterator) appendGeneric(p *_MapPair, t *rt.GoType, v reflect.Kind, k unsafe.Pointer) error {
|
||||
switch v {
|
||||
case reflect.Int : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int)(k)), 10)) ; return nil
|
||||
case reflect.Int8 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int8)(k)), 10)) ; return nil
|
||||
case reflect.Int16 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int16)(k)), 10)) ; return nil
|
||||
case reflect.Int32 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int32)(k)), 10)) ; return nil
|
||||
case reflect.Int64 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int64)(k)), 10)) ; return nil
|
||||
case reflect.Int : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int)(k)), 10)) ; return nil
|
||||
case reflect.Int8 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int8)(k)), 10)) ; return nil
|
||||
case reflect.Int16 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int16)(k)), 10)) ; return nil
|
||||
case reflect.Int32 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int32)(k)), 10)) ; return nil
|
||||
case reflect.Int64 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int64)(k)), 10)) ; return nil
|
||||
case reflect.Uint : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint)(k)), 10)) ; return nil
|
||||
case reflect.Uint8 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint8)(k)), 10)) ; return nil
|
||||
case reflect.Uint16 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint16)(k)), 10)) ; return nil
|
||||
case reflect.Uint32 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint32)(k)), 10)) ; return nil
|
||||
case reflect.Uint64 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint64)(k)), 10)) ; return nil
|
||||
case reflect.Uint64 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint64)(k)), 10)) ; return nil
|
||||
case reflect.Uintptr : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uintptr)(k)), 10)) ; return nil
|
||||
case reflect.Bool : if *(*bool)(k) { p.k = "true" } else { p.k = "false" }; return nil
|
||||
case reflect.Interface : return self.appendInterface(p, t, k)
|
||||
case reflect.Struct, reflect.Ptr : return self.appendConcrete(p, t, k)
|
||||
default : panic("unexpected map key type")
|
||||
|
|
|
|||
18
vendor/github.com/bytedance/sonic/internal/encoder/alg/spec.go
generated
vendored
18
vendor/github.com/bytedance/sonic/internal/encoder/alg/spec.go
generated
vendored
|
|
@ -21,6 +21,7 @@ package alg
|
|||
|
||||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/bytedance/sonic/internal/native"
|
||||
|
|
@ -177,22 +178,9 @@ func F32toa(buf []byte, v float32) ([]byte) {
|
|||
}
|
||||
|
||||
func I64toa(buf []byte, v int64) ([]byte) {
|
||||
buf = rt.GuardSlice2(buf, 32)
|
||||
ret := native.I64toa((*byte)(rt.IndexByte(buf, len(buf))), v)
|
||||
if ret > 0 {
|
||||
return buf[:len(buf)+ret]
|
||||
} else {
|
||||
return buf
|
||||
}
|
||||
return strconv.AppendInt(buf, v, 10)
|
||||
}
|
||||
|
||||
func U64toa(buf []byte, v uint64) ([]byte) {
|
||||
buf = rt.GuardSlice2(buf, 32)
|
||||
ret := native.U64toa((*byte)(rt.IndexByte(buf, len(buf))), v)
|
||||
if ret > 0 {
|
||||
return buf[:len(buf)+ret]
|
||||
} else {
|
||||
return buf
|
||||
}
|
||||
return strconv.AppendUint(buf, v, 10)
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/bytedance/sonic/internal/optcaching/fcache.go
generated
vendored
2
vendor/github.com/bytedance/sonic/internal/optcaching/fcache.go
generated
vendored
|
|
@ -324,7 +324,7 @@ func (self *NormalFieldMap) Set(fields []resolver.FieldMeta) {
|
|||
|
||||
}
|
||||
|
||||
// use hashnap
|
||||
// use hashmap
|
||||
type FallbackFieldMap struct {
|
||||
oders []string
|
||||
inner map[string]int
|
||||
|
|
|
|||
3
vendor/github.com/bytedance/sonic/sonic.go
generated
vendored
3
vendor/github.com/bytedance/sonic/sonic.go
generated
vendored
|
|
@ -90,6 +90,9 @@ func (cfg Config) Froze() API {
|
|||
if cfg.ValidateString {
|
||||
api.decoderOpts |= decoder.OptionValidateString
|
||||
}
|
||||
if cfg.CaseSensitive {
|
||||
api.decoderOpts |= decoder.OptionCaseSensitive
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/bytedance/sonic/utf8/utf8.go
generated
vendored
2
vendor/github.com/bytedance/sonic/utf8/utf8.go
generated
vendored
|
|
@ -62,7 +62,7 @@ func CorrectWith(dst []byte, src []byte, repl string) []byte {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Validate is a simd-accelereated drop-in replacement for the standard library's utf8.Valid.
|
||||
// Validate is a simd-accelerated drop-in replacement for the standard library's utf8.Valid.
|
||||
func Validate(src []byte) bool {
|
||||
if src == nil {
|
||||
return true
|
||||
|
|
|
|||
5
vendor/github.com/gabriel-vasile/mimetype/.golangci.yml
generated
vendored
Normal file
5
vendor/github.com/gabriel-vasile/mimetype/.golangci.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
version: "2"
|
||||
linters:
|
||||
exclusions:
|
||||
presets:
|
||||
- std-error-handling
|
||||
567
vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
generated
vendored
567
vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
generated
vendored
|
|
@ -1,567 +0,0 @@
|
|||
// Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package json provides a JSON value parser state machine.
|
||||
// This package is almost entirely copied from the Go stdlib.
|
||||
// Changes made to it permit users of the package to tell
|
||||
// if some slice of bytes is a valid beginning of a json string.
|
||||
package json
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type (
|
||||
scanStatus int
|
||||
)
|
||||
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
|
||||
scanContinue scanStatus = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
|
||||
// This limits the max nesting depth to prevent stack overflow.
|
||||
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
|
||||
maxNestingDepth = 10000
|
||||
)
|
||||
|
||||
type (
|
||||
scanner struct {
|
||||
step func(*scanner, byte) scanStatus
|
||||
parseState []int
|
||||
endTop bool
|
||||
err error
|
||||
index int
|
||||
}
|
||||
)
|
||||
|
||||
var scannerPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &scanner{}
|
||||
},
|
||||
}
|
||||
|
||||
func newScanner() *scanner {
|
||||
s := scannerPool.Get().(*scanner)
|
||||
s.reset()
|
||||
return s
|
||||
}
|
||||
|
||||
func freeScanner(s *scanner) {
|
||||
// Avoid hanging on to too much memory in extreme cases.
|
||||
if len(s.parseState) > 1024 {
|
||||
s.parseState = nil
|
||||
}
|
||||
scannerPool.Put(s)
|
||||
}
|
||||
|
||||
// Scan returns the number of bytes scanned and if there was any error
|
||||
// in trying to reach the end of data.
|
||||
func Scan(data []byte) (int, error) {
|
||||
s := newScanner()
|
||||
defer freeScanner(s)
|
||||
_ = checkValid(data, s)
|
||||
return s.index, s.err
|
||||
}
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
for _, c := range data {
|
||||
scan.index++
|
||||
if scan.step(scan, c) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.endTop = false
|
||||
s.index = 0
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() scanStatus {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = fmt.Errorf("unexpected end of JSON input")
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
|
||||
func (s *scanner) pushParseState(c byte, newParseState int, successState scanStatus) scanStatus {
|
||||
s.parseState = append(s.parseState, newParseState)
|
||||
if len(s.parseState) <= maxNestingDepth {
|
||||
return successState
|
||||
}
|
||||
return s.error(c, "exceeded max depth")
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c byte) scanStatus {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c byte) scanStatus {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
return s.pushParseState(c, parseObjectKey, scanBeginObject)
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
return s.pushParseState(c, parseArrayValue, scanBeginArray)
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c byte) scanStatus {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c byte) scanStatus {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c byte) scanStatus {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if c <= ' ' && isSpace(c) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c byte) scanStatus {
|
||||
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c byte) scanStatus {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c byte) scanStatus {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
case 'u':
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c byte) scanStatus {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c byte) scanStatus {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c byte) scanStatus {
|
||||
if c == '+' || c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c byte) scanStatus {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c byte) scanStatus {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c byte) scanStatus {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c byte) scanStatus {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c byte) scanStatus {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c byte) scanStatus {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c byte) scanStatus {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c byte) scanStatus {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c byte) scanStatus {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c byte) scanStatus {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c byte) scanStatus {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c byte) scanStatus {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c byte, context string) scanStatus {
|
||||
s.step = stateError
|
||||
s.err = fmt.Errorf("invalid character <<%c>> %s", c, context)
|
||||
return scanError
|
||||
}
|
||||
464
vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go
generated
vendored
Normal file
464
vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go
generated
vendored
Normal file
|
|
@ -0,0 +1,464 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
QueryNone = "json"
|
||||
QueryGeo = "geo"
|
||||
QueryHAR = "har"
|
||||
QueryGLTF = "gltf"
|
||||
maxRecursion = 4096
|
||||
)
|
||||
|
||||
var queries = map[string][]query{
|
||||
QueryNone: nil,
|
||||
QueryGeo: {{
|
||||
SearchPath: [][]byte{[]byte("type")},
|
||||
SearchVals: [][]byte{
|
||||
[]byte(`"Feature"`),
|
||||
[]byte(`"FeatureCollection"`),
|
||||
[]byte(`"Point"`),
|
||||
[]byte(`"LineString"`),
|
||||
[]byte(`"Polygon"`),
|
||||
[]byte(`"MultiPoint"`),
|
||||
[]byte(`"MultiLineString"`),
|
||||
[]byte(`"MultiPolygon"`),
|
||||
[]byte(`"GeometryCollection"`),
|
||||
},
|
||||
}},
|
||||
QueryHAR: {{
|
||||
SearchPath: [][]byte{[]byte("log"), []byte("version")},
|
||||
}, {
|
||||
SearchPath: [][]byte{[]byte("log"), []byte("creator")},
|
||||
}, {
|
||||
SearchPath: [][]byte{[]byte("log"), []byte("entries")},
|
||||
}},
|
||||
QueryGLTF: {{
|
||||
SearchPath: [][]byte{[]byte("asset"), []byte("version")},
|
||||
SearchVals: [][]byte{[]byte(`"1.0"`), []byte(`"2.0"`)},
|
||||
}},
|
||||
}
|
||||
|
||||
var parserPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &parserState{maxRecursion: maxRecursion}
|
||||
},
|
||||
}
|
||||
|
||||
// parserState holds the state of JSON parsing. The number of inspected bytes,
|
||||
// the current path inside the JSON object, etc.
|
||||
type parserState struct {
|
||||
// ib represents the number of inspected bytes.
|
||||
// Because mimetype limits itself to only reading the header of the file,
|
||||
// it means sometimes the input JSON can be truncated. In that case, we want
|
||||
// to still detect it as JSON, even if it's invalid/truncated.
|
||||
// When ib == len(input) it means the JSON was valid (at least the header).
|
||||
ib int
|
||||
maxRecursion int
|
||||
// currPath keeps a track of the JSON keys parsed up.
|
||||
// It works only for JSON objects. JSON arrays are ignored
|
||||
// mainly because the functionality is not needed.
|
||||
currPath [][]byte
|
||||
// firstToken stores the first JSON token encountered in input.
|
||||
// TODO: performance would be better if we would stop parsing as soon
|
||||
// as we see that first token is not what we are interested in.
|
||||
firstToken int
|
||||
// querySatisfied is true if both path and value of any queries passed to
|
||||
// consumeAny are satisfied.
|
||||
querySatisfied bool
|
||||
}
|
||||
|
||||
// query holds information about a combination of {"key": "val"} that we're trying
|
||||
// to search for inside the JSON.
|
||||
type query struct {
|
||||
// SearchPath represents the whole path to look for inside the JSON.
|
||||
// ex: [][]byte{[]byte("foo"), []byte("bar")} matches {"foo": {"bar": "baz"}}
|
||||
SearchPath [][]byte
|
||||
// SearchVals represents values to look for when the SearchPath is found.
|
||||
// Each SearchVal element is tried until one of them matches (logical OR.)
|
||||
SearchVals [][]byte
|
||||
}
|
||||
|
||||
func eq(path1, path2 [][]byte) bool {
|
||||
if len(path1) != len(path2) {
|
||||
return false
|
||||
}
|
||||
for i := range path1 {
|
||||
if !bytes.Equal(path1[i], path2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// LooksLikeObjectOrArray reports if first non white space character from raw
|
||||
// is either { or [. Parsing raw as JSON is a heavy operation. When receiving some
|
||||
// text input we can skip parsing if the input does not even look like JSON.
|
||||
func LooksLikeObjectOrArray(raw []byte) bool {
|
||||
for i := range raw {
|
||||
if isSpace(raw[i]) {
|
||||
continue
|
||||
}
|
||||
return raw[i] == '{' || raw[i] == '['
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Parse will take out a parser from the pool depending on queryType and tries
|
||||
// to parse raw bytes as JSON.
|
||||
func Parse(queryType string, raw []byte) (parsed, inspected, firstToken int, querySatisfied bool) {
|
||||
p := parserPool.Get().(*parserState)
|
||||
defer func() {
|
||||
// Avoid hanging on to too much memory in extreme input cases.
|
||||
if len(p.currPath) > 128 {
|
||||
p.currPath = nil
|
||||
}
|
||||
parserPool.Put(p)
|
||||
}()
|
||||
p.reset()
|
||||
|
||||
qs := queries[queryType]
|
||||
got := p.consumeAny(raw, qs, 0)
|
||||
return got, p.ib, p.firstToken, p.querySatisfied
|
||||
}
|
||||
|
||||
func (p *parserState) reset() {
|
||||
p.ib = 0
|
||||
p.currPath = p.currPath[0:0]
|
||||
p.firstToken = TokInvalid
|
||||
p.querySatisfied = false
|
||||
}
|
||||
|
||||
func (p *parserState) consumeSpace(b []byte) (n int) {
|
||||
for len(b) > 0 && isSpace(b[0]) {
|
||||
b = b[1:]
|
||||
n++
|
||||
p.ib++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parserState) consumeConst(b, cnst []byte) int {
|
||||
lb := len(b)
|
||||
for i, c := range cnst {
|
||||
if lb > i && b[i] == c {
|
||||
p.ib++
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return len(cnst)
|
||||
}
|
||||
|
||||
func (p *parserState) consumeString(b []byte) (n int) {
|
||||
var c byte
|
||||
for len(b[n:]) > 0 {
|
||||
c, n = b[n], n+1
|
||||
p.ib++
|
||||
switch c {
|
||||
case '\\':
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
switch b[n] {
|
||||
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
|
||||
n++
|
||||
p.ib++
|
||||
continue
|
||||
case 'u':
|
||||
n++
|
||||
p.ib++
|
||||
for j := 0; j < 4 && len(b[n:]) > 0; j++ {
|
||||
if !isXDigit(b[n]) {
|
||||
return 0
|
||||
}
|
||||
n++
|
||||
p.ib++
|
||||
}
|
||||
continue
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
case '"':
|
||||
return n
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *parserState) consumeNumber(b []byte) (n int) {
|
||||
got := false
|
||||
var i int
|
||||
|
||||
if len(b) == 0 {
|
||||
goto out
|
||||
}
|
||||
if b[0] == '-' {
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
if !isDigit(b[0]) {
|
||||
break
|
||||
}
|
||||
got = true
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
}
|
||||
if len(b) == 0 {
|
||||
goto out
|
||||
}
|
||||
if b[0] == '.' {
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
}
|
||||
for len(b) > 0 {
|
||||
if !isDigit(b[0]) {
|
||||
break
|
||||
}
|
||||
got = true
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
}
|
||||
if len(b) == 0 {
|
||||
goto out
|
||||
}
|
||||
if got && (b[0] == 'e' || b[0] == 'E') {
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
got = false
|
||||
if len(b) == 0 {
|
||||
goto out
|
||||
}
|
||||
if b[0] == '+' || b[0] == '-' {
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
}
|
||||
for len(b) > 0 {
|
||||
if !isDigit(b[0]) {
|
||||
break
|
||||
}
|
||||
got = true
|
||||
b, i = b[1:], i+1
|
||||
p.ib++
|
||||
}
|
||||
}
|
||||
out:
|
||||
if got {
|
||||
return i
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *parserState) consumeArray(b []byte, qs []query, lvl int) (n int) {
|
||||
p.currPath = append(p.currPath, []byte{'['})
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
for n < len(b) {
|
||||
n += p.consumeSpace(b[n:])
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
if b[n] == ']' {
|
||||
p.ib++
|
||||
p.currPath = p.currPath[:len(p.currPath)-1]
|
||||
return n + 1
|
||||
}
|
||||
innerParsed := p.consumeAny(b[n:], qs, lvl)
|
||||
if innerParsed == 0 {
|
||||
return 0
|
||||
}
|
||||
n += innerParsed
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
switch b[n] {
|
||||
case ',':
|
||||
n += 1
|
||||
p.ib++
|
||||
continue
|
||||
case ']':
|
||||
p.ib++
|
||||
return n + 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func queryPathMatch(qs []query, path [][]byte) int {
|
||||
for i := range qs {
|
||||
if eq(qs[i].SearchPath, path) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p *parserState) consumeObject(b []byte, qs []query, lvl int) (n int) {
|
||||
for n < len(b) {
|
||||
n += p.consumeSpace(b[n:])
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
if b[n] == '}' {
|
||||
p.ib++
|
||||
return n + 1
|
||||
}
|
||||
if b[n] != '"' {
|
||||
return 0
|
||||
} else {
|
||||
n += 1
|
||||
p.ib++
|
||||
}
|
||||
// queryMatched stores the index of the query satisfying the current path.
|
||||
queryMatched := -1
|
||||
if keyLen := p.consumeString(b[n:]); keyLen == 0 {
|
||||
return 0
|
||||
} else {
|
||||
p.currPath = append(p.currPath, b[n:n+keyLen-1])
|
||||
if !p.querySatisfied {
|
||||
queryMatched = queryPathMatch(qs, p.currPath)
|
||||
}
|
||||
n += keyLen
|
||||
}
|
||||
n += p.consumeSpace(b[n:])
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
if b[n] != ':' {
|
||||
return 0
|
||||
} else {
|
||||
n += 1
|
||||
p.ib++
|
||||
}
|
||||
n += p.consumeSpace(b[n:])
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
if valLen := p.consumeAny(b[n:], qs, lvl); valLen == 0 {
|
||||
return 0
|
||||
} else {
|
||||
if queryMatched != -1 {
|
||||
q := qs[queryMatched]
|
||||
if len(q.SearchVals) == 0 {
|
||||
p.querySatisfied = true
|
||||
}
|
||||
for _, val := range q.SearchVals {
|
||||
if bytes.Equal(val, bytes.TrimSpace(b[n:n+valLen])) {
|
||||
p.querySatisfied = true
|
||||
}
|
||||
}
|
||||
}
|
||||
n += valLen
|
||||
}
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
switch b[n] {
|
||||
case ',':
|
||||
p.currPath = p.currPath[:len(p.currPath)-1]
|
||||
n++
|
||||
p.ib++
|
||||
continue
|
||||
case '}':
|
||||
p.currPath = p.currPath[:len(p.currPath)-1]
|
||||
p.ib++
|
||||
return n + 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *parserState) consumeAny(b []byte, qs []query, lvl int) (n int) {
|
||||
// Avoid too much recursion.
|
||||
if p.maxRecursion != 0 && lvl > p.maxRecursion {
|
||||
return 0
|
||||
}
|
||||
n += p.consumeSpace(b)
|
||||
if len(b[n:]) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var t, rv int
|
||||
switch b[n] {
|
||||
case '"':
|
||||
n++
|
||||
p.ib++
|
||||
rv = p.consumeString(b[n:])
|
||||
t = TokString
|
||||
case '[':
|
||||
n++
|
||||
p.ib++
|
||||
rv = p.consumeArray(b[n:], qs, lvl+1)
|
||||
t = TokArray
|
||||
case '{':
|
||||
n++
|
||||
p.ib++
|
||||
rv = p.consumeObject(b[n:], qs, lvl+1)
|
||||
t = TokObject
|
||||
case 't':
|
||||
rv = p.consumeConst(b[n:], []byte("true"))
|
||||
t = TokTrue
|
||||
case 'f':
|
||||
rv = p.consumeConst(b[n:], []byte("false"))
|
||||
t = TokFalse
|
||||
case 'n':
|
||||
rv = p.consumeConst(b[n:], []byte("null"))
|
||||
t = TokNull
|
||||
default:
|
||||
rv = p.consumeNumber(b[n:])
|
||||
t = TokNumber
|
||||
}
|
||||
if lvl == 0 {
|
||||
p.firstToken = t
|
||||
}
|
||||
if len(qs) == 0 {
|
||||
p.querySatisfied = true
|
||||
}
|
||||
if rv <= 0 {
|
||||
return n
|
||||
}
|
||||
n += rv
|
||||
n += p.consumeSpace(b[n:])
|
||||
return n
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
func isDigit(c byte) bool {
|
||||
return '0' <= c && c <= '9'
|
||||
}
|
||||
|
||||
func isXDigit(c byte) bool {
|
||||
if isDigit(c) {
|
||||
return true
|
||||
}
|
||||
return ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
|
||||
}
|
||||
|
||||
const (
|
||||
TokInvalid = 0
|
||||
TokNull = 1 << iota
|
||||
TokTrue
|
||||
TokFalse
|
||||
TokNumber
|
||||
TokString
|
||||
TokArray
|
||||
TokObject
|
||||
TokComma
|
||||
)
|
||||
2
vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
generated
vendored
2
vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
generated
vendored
|
|
@ -137,7 +137,7 @@ func tarParseOctal(b []byte) int64 {
|
|||
if b == 0 {
|
||||
break
|
||||
}
|
||||
if !(b >= '0' && b <= '7') {
|
||||
if b < '0' || b > '7' {
|
||||
return -1
|
||||
}
|
||||
ret = (ret << 3) | int64(b-'0')
|
||||
|
|
|
|||
6
vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
generated
vendored
6
vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
generated
vendored
|
|
@ -71,7 +71,7 @@ func Dbf(raw []byte, limit uint32) bool {
|
|||
}
|
||||
|
||||
// 3rd and 4th bytes contain the last update month and day of month.
|
||||
if !(0 < raw[2] && raw[2] < 13 && 0 < raw[3] && raw[3] < 32) {
|
||||
if raw[2] == 0 || raw[2] > 12 || raw[3] == 0 || raw[3] > 31 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
@ -153,7 +153,7 @@ func Marc(raw []byte, limit uint32) bool {
|
|||
return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E})
|
||||
}
|
||||
|
||||
// Glb matches a glTF model format file.
|
||||
// GLB matches a glTF model format file.
|
||||
// GLB is the binary file format representation of 3D models saved in
|
||||
// the GL transmission Format (glTF).
|
||||
// GLB uses little endian and its header structure is as follows:
|
||||
|
|
@ -168,7 +168,7 @@ func Marc(raw []byte, limit uint32) bool {
|
|||
//
|
||||
// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html
|
||||
// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary
|
||||
var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"),
|
||||
var GLB = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"),
|
||||
[]byte("\x67\x6C\x54\x46\x01\x00\x00\x00"))
|
||||
|
||||
// TzIf matches a Time Zone Information Format (TZif) file.
|
||||
|
|
|
|||
14
vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
generated
vendored
14
vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
generated
vendored
|
|
@ -12,13 +12,13 @@ func Shp(raw []byte, limit uint32) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if !(binary.BigEndian.Uint32(raw[0:4]) == 9994 &&
|
||||
binary.BigEndian.Uint32(raw[4:8]) == 0 &&
|
||||
binary.BigEndian.Uint32(raw[8:12]) == 0 &&
|
||||
binary.BigEndian.Uint32(raw[12:16]) == 0 &&
|
||||
binary.BigEndian.Uint32(raw[16:20]) == 0 &&
|
||||
binary.BigEndian.Uint32(raw[20:24]) == 0 &&
|
||||
binary.LittleEndian.Uint32(raw[28:32]) == 1000) {
|
||||
if binary.BigEndian.Uint32(raw[0:4]) != 9994 ||
|
||||
binary.BigEndian.Uint32(raw[4:8]) != 0 ||
|
||||
binary.BigEndian.Uint32(raw[8:12]) != 0 ||
|
||||
binary.BigEndian.Uint32(raw[12:16]) != 0 ||
|
||||
binary.BigEndian.Uint32(raw[16:20]) != 0 ||
|
||||
binary.BigEndian.Uint32(raw[20:24]) != 0 ||
|
||||
binary.LittleEndian.Uint32(raw[28:32]) != 1000 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
|||
164
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
generated
vendored
164
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
generated
vendored
|
|
@ -2,7 +2,6 @@ package magic
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gabriel-vasile/mimetype/internal/charset"
|
||||
|
|
@ -154,145 +153,75 @@ func Php(raw []byte, limit uint32) bool {
|
|||
|
||||
// JSON matches a JavaScript Object Notation file.
|
||||
func JSON(raw []byte, limit uint32) bool {
|
||||
raw = trimLWS(raw)
|
||||
// #175 A single JSON string, number or bool is not considered JSON.
|
||||
// JSON objects and arrays are reported as JSON.
|
||||
if len(raw) < 2 || (raw[0] != '[' && raw[0] != '{') {
|
||||
return false
|
||||
}
|
||||
parsed, err := json.Scan(raw)
|
||||
// If the full file content was provided, check there is no error.
|
||||
if limit == 0 || len(raw) < int(limit) {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// If a section of the file was provided, check if all of it was parsed.
|
||||
return parsed == len(raw) && len(raw) > 0
|
||||
return jsonHelper(raw, limit, json.QueryNone, json.TokObject|json.TokArray)
|
||||
}
|
||||
|
||||
// GeoJSON matches a RFC 7946 GeoJSON file.
|
||||
//
|
||||
// GeoJSON detection implies searching for key:value pairs like: `"type": "Feature"`
|
||||
// in the input.
|
||||
// BUG(gabriel-vasile): The "type" key should be searched for in the root object.
|
||||
func GeoJSON(raw []byte, limit uint32) bool {
|
||||
raw = trimLWS(raw)
|
||||
if len(raw) == 0 {
|
||||
return jsonHelper(raw, limit, json.QueryGeo, json.TokObject)
|
||||
}
|
||||
|
||||
// HAR matches a HAR Spec file.
|
||||
// Spec: http://www.softwareishard.com/blog/har-12-spec/
|
||||
func HAR(raw []byte, limit uint32) bool {
|
||||
return jsonHelper(raw, limit, json.QueryHAR, json.TokObject)
|
||||
}
|
||||
|
||||
// GLTF matches a GL Transmission Format (JSON) file.
|
||||
// Visit [glTF specification] and [IANA glTF entry] for more details.
|
||||
//
|
||||
// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html
|
||||
// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf+json
|
||||
func GLTF(raw []byte, limit uint32) bool {
|
||||
return jsonHelper(raw, limit, json.QueryGLTF, json.TokObject)
|
||||
}
|
||||
|
||||
func jsonHelper(raw []byte, limit uint32, q string, wantTok int) bool {
|
||||
if !json.LooksLikeObjectOrArray(raw) {
|
||||
return false
|
||||
}
|
||||
// GeoJSON is always a JSON object, not a JSON array or any other JSON value.
|
||||
if raw[0] != '{' {
|
||||
lraw := len(raw)
|
||||
parsed, inspected, firstToken, querySatisfied := json.Parse(q, raw)
|
||||
if !querySatisfied || firstToken&wantTok == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
s := []byte(`"type"`)
|
||||
si, sl := bytes.Index(raw, s), len(s)
|
||||
|
||||
if si == -1 {
|
||||
return false
|
||||
// If the full file content was provided, check that the whole input was parsed.
|
||||
if limit == 0 || lraw < int(limit) {
|
||||
return parsed == lraw
|
||||
}
|
||||
|
||||
// If the "type" string is the suffix of the input,
|
||||
// there is no need to search for the value of the key.
|
||||
if si+sl == len(raw) {
|
||||
return false
|
||||
}
|
||||
// Skip the "type" part.
|
||||
raw = raw[si+sl:]
|
||||
// Skip any whitespace before the colon.
|
||||
raw = trimLWS(raw)
|
||||
// Check for colon.
|
||||
if len(raw) == 0 || raw[0] != ':' {
|
||||
return false
|
||||
}
|
||||
// Skip any whitespace after the colon.
|
||||
raw = trimLWS(raw[1:])
|
||||
|
||||
geoJSONTypes := [][]byte{
|
||||
[]byte(`"Feature"`),
|
||||
[]byte(`"FeatureCollection"`),
|
||||
[]byte(`"Point"`),
|
||||
[]byte(`"LineString"`),
|
||||
[]byte(`"Polygon"`),
|
||||
[]byte(`"MultiPoint"`),
|
||||
[]byte(`"MultiLineString"`),
|
||||
[]byte(`"MultiPolygon"`),
|
||||
[]byte(`"GeometryCollection"`),
|
||||
}
|
||||
for _, t := range geoJSONTypes {
|
||||
if bytes.HasPrefix(raw, t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
// If a section of the file was provided, check if all of it was inspected.
|
||||
// In other words, check that if there was a problem parsing, that problem
|
||||
// occured at the last byte in the input.
|
||||
return inspected == lraw && lraw > 0
|
||||
}
|
||||
|
||||
// NdJSON matches a Newline delimited JSON file. All complete lines from raw
|
||||
// must be valid JSON documents meaning they contain one of the valid JSON data
|
||||
// types.
|
||||
func NdJSON(raw []byte, limit uint32) bool {
|
||||
lCount, hasObjOrArr := 0, false
|
||||
lCount, objOrArr := 0, 0
|
||||
raw = dropLastLine(raw, limit)
|
||||
var l []byte
|
||||
for len(raw) != 0 {
|
||||
l, raw = scanLine(raw)
|
||||
// Empty lines are allowed in NDJSON.
|
||||
if l = trimRWS(trimLWS(l)); len(l) == 0 {
|
||||
continue
|
||||
}
|
||||
_, err := json.Scan(l)
|
||||
if err != nil {
|
||||
_, inspected, firstToken, _ := json.Parse(json.QueryNone, l)
|
||||
if len(l) != inspected {
|
||||
return false
|
||||
}
|
||||
if l[0] == '[' || l[0] == '{' {
|
||||
hasObjOrArr = true
|
||||
if firstToken == json.TokArray || firstToken == json.TokObject {
|
||||
objOrArr++
|
||||
}
|
||||
lCount++
|
||||
}
|
||||
|
||||
return lCount > 1 && hasObjOrArr
|
||||
}
|
||||
|
||||
// HAR matches a HAR Spec file.
|
||||
// Spec: http://www.softwareishard.com/blog/har-12-spec/
|
||||
func HAR(raw []byte, limit uint32) bool {
|
||||
s := []byte(`"log"`)
|
||||
si, sl := bytes.Index(raw, s), len(s)
|
||||
|
||||
if si == -1 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the "log" string is the suffix of the input,
|
||||
// there is no need to search for the value of the key.
|
||||
if si+sl == len(raw) {
|
||||
return false
|
||||
}
|
||||
// Skip the "log" part.
|
||||
raw = raw[si+sl:]
|
||||
// Skip any whitespace before the colon.
|
||||
raw = trimLWS(raw)
|
||||
// Check for colon.
|
||||
if len(raw) == 0 || raw[0] != ':' {
|
||||
return false
|
||||
}
|
||||
// Skip any whitespace after the colon.
|
||||
raw = trimLWS(raw[1:])
|
||||
|
||||
harJSONTypes := [][]byte{
|
||||
[]byte(`"version"`),
|
||||
[]byte(`"creator"`),
|
||||
[]byte(`"entries"`),
|
||||
}
|
||||
for _, t := range harJSONTypes {
|
||||
si := bytes.Index(raw, t)
|
||||
if si > -1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return lCount > 1 && objOrArr > 0
|
||||
}
|
||||
|
||||
// Svg matches a SVG file.
|
||||
|
|
@ -305,32 +234,31 @@ func Srt(raw []byte, _ uint32) bool {
|
|||
line, raw := scanLine(raw)
|
||||
|
||||
// First line must be 1.
|
||||
if string(line) != "1" {
|
||||
if len(line) != 1 || line[0] != '1' {
|
||||
return false
|
||||
}
|
||||
line, raw = scanLine(raw)
|
||||
secondLine := string(line)
|
||||
// Timestamp format (e.g: 00:02:16,612 --> 00:02:19,376) limits secondLine
|
||||
// Timestamp format (e.g: 00:02:16,612 --> 00:02:19,376) limits second line
|
||||
// length to exactly 29 characters.
|
||||
if len(secondLine) != 29 {
|
||||
if len(line) != 29 {
|
||||
return false
|
||||
}
|
||||
// Decimal separator of fractional seconds in the timestamps must be a
|
||||
// comma, not a period.
|
||||
if strings.Contains(secondLine, ".") {
|
||||
if bytes.IndexByte(line, '.') != -1 {
|
||||
return false
|
||||
}
|
||||
// Second line must be a time range.
|
||||
ts := strings.Split(secondLine, " --> ")
|
||||
if len(ts) != 2 {
|
||||
sep := []byte(" --> ")
|
||||
i := bytes.Index(line, sep)
|
||||
if i == -1 {
|
||||
return false
|
||||
}
|
||||
const layout = "15:04:05,000"
|
||||
t0, err := time.Parse(layout, ts[0])
|
||||
t0, err := time.Parse(layout, string(line[:i]))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
t1, err := time.Parse(layout, ts[1])
|
||||
t1, err := time.Parse(layout, string(line[i+len(sep):]))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
3
vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
generated
vendored
3
vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
## 178 Supported MIME types
|
||||
## 179 Supported MIME types
|
||||
This file is automatically generated when running tests. Do not edit manually.
|
||||
|
||||
Extension | MIME type | Aliases
|
||||
|
|
@ -171,6 +171,7 @@ Extension | MIME type | Aliases
|
|||
**.json** | application/json | -
|
||||
**.geojson** | application/geo+json | -
|
||||
**.har** | application/json | -
|
||||
**.gltf** | model/gltf+json | -
|
||||
**.ndjson** | application/x-ndjson | -
|
||||
**.rtf** | text/rtf | application/rtf
|
||||
**.srt** | application/x-subrip | application/x-srt, text/x-srt
|
||||
|
|
|
|||
5
vendor/github.com/gabriel-vasile/mimetype/tree.go
generated
vendored
5
vendor/github.com/gabriel-vasile/mimetype/tree.go
generated
vendored
|
|
@ -83,7 +83,7 @@ var (
|
|||
text = newMIME("text/plain", ".txt", magic.Text, html, svg, xml, php, js, lua, perl, python, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt)
|
||||
xml = newMIME("text/xml", ".xml", magic.XML, rss, atom, x3d, kml, xliff, collada, gml, gpx, tcx, amf, threemf, xfdf, owl2).
|
||||
alias("application/xml")
|
||||
json = newMIME("application/json", ".json", magic.JSON, geoJSON, har)
|
||||
json = newMIME("application/json", ".json", magic.JSON, geoJSON, har, gltf)
|
||||
har = newMIME("application/json", ".har", magic.HAR)
|
||||
csv = newMIME("text/csv", ".csv", magic.Csv)
|
||||
tsv = newMIME("text/tab-separated-values", ".tsv", magic.Tsv)
|
||||
|
|
@ -262,7 +262,8 @@ var (
|
|||
pat = newMIME("image/x-gimp-pat", ".pat", magic.Pat)
|
||||
gbr = newMIME("image/x-gimp-gbr", ".gbr", magic.Gbr)
|
||||
xfdf = newMIME("application/vnd.adobe.xfdf", ".xfdf", magic.Xfdf)
|
||||
glb = newMIME("model/gltf-binary", ".glb", magic.Glb)
|
||||
glb = newMIME("model/gltf-binary", ".glb", magic.GLB)
|
||||
gltf = newMIME("model/gltf+json", ".gltf", magic.GLTF)
|
||||
jxr = newMIME("image/jxr", ".jxr", magic.Jxr).alias("image/vnd.ms-photo")
|
||||
parquet = newMIME("application/vnd.apache.parquet", ".parquet", magic.Par1).
|
||||
alias("application/x-parquet")
|
||||
|
|
|
|||
215
vendor/github.com/gin-contrib/cors/README.md
generated
vendored
215
vendor/github.com/gin-contrib/cors/README.md
generated
vendored
|
|
@ -1,47 +1,89 @@
|
|||
# CORS gin's middleware
|
||||
# gin-contrib/cors
|
||||
|
||||
[](https://github.com/gin-contrib/cors/actions/workflows/go.yml)
|
||||
[](https://codecov.io/gh/gin-contrib/cors)
|
||||
[](https://goreportcard.com/report/github.com/gin-contrib/cors)
|
||||
[](https://godoc.org/github.com/gin-contrib/cors)
|
||||
|
||||
Gin middleware/handler to enable CORS support.
|
||||
- [gin-contrib/cors](#gin-contribcors)
|
||||
- [Overview](#overview)
|
||||
- [Installation](#installation)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Advanced Usage](#advanced-usage)
|
||||
- [Custom Configuration](#custom-configuration)
|
||||
- [DefaultConfig Reference](#defaultconfig-reference)
|
||||
- [Default() Convenience](#default-convenience)
|
||||
- [Configuration Reference](#configuration-reference)
|
||||
- [Notes on Configuration](#notes-on-configuration)
|
||||
- [Examples](#examples)
|
||||
- [Advanced Options](#advanced-options)
|
||||
- [Custom Origin Validation](#custom-origin-validation)
|
||||
- [With Gin Context](#with-gin-context)
|
||||
- [Helper Methods](#helper-methods)
|
||||
- [Validation \& Error Handling](#validation--error-handling)
|
||||
- [Important Notes](#important-notes)
|
||||
|
||||
## Usage
|
||||
---
|
||||
|
||||
### Start using it
|
||||
## Overview
|
||||
|
||||
Download and install it:
|
||||
**CORS (Cross-Origin Resource Sharing)** middleware for [Gin](https://github.com/gin-gonic/gin).
|
||||
|
||||
- Enables flexible CORS handling for your Gin-based APIs.
|
||||
- Highly configurable: origins, methods, headers, credentials, and more.
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
go get github.com/gin-contrib/cors
|
||||
```
|
||||
|
||||
Import it in your code:
|
||||
Import in your Go code:
|
||||
|
||||
```go
|
||||
import "github.com/gin-contrib/cors"
|
||||
```
|
||||
|
||||
### Canonical example
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
Allow all origins (default):
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := gin.Default()
|
||||
// CORS for https://foo.com and https://github.com origins, allowing:
|
||||
// - PUT and PATCH methods
|
||||
// - Origin header
|
||||
// - Credentials share
|
||||
// - Preflight requests cached for 12 hours
|
||||
router.Use(cors.Default()) // All origins allowed by default
|
||||
router.Run()
|
||||
}
|
||||
```
|
||||
|
||||
> ⚠️ **Warning:** Allowing all origins disables cookies for clients. For credentialed requests, **do not** allow all origins.
|
||||
|
||||
---
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
Configure allowed origins, methods, headers, and more:
|
||||
|
||||
```go
|
||||
import (
|
||||
"time"
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := gin.Default()
|
||||
router.Use(cors.New(cors.Config{
|
||||
AllowOrigins: []string{"https://foo.com"},
|
||||
AllowMethods: []string{"PUT", "PATCH"},
|
||||
|
|
@ -57,15 +99,20 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
### Using DefaultConfig as start point
|
||||
---
|
||||
|
||||
### DefaultConfig Reference
|
||||
|
||||
Start with library defaults and customize as needed:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := gin.Default()
|
||||
// - No origin allowed by default
|
||||
// - GET,POST, PUT, HEAD methods
|
||||
// - Credentials share disabled
|
||||
// - Preflight requests cached for 12 hours
|
||||
config := cors.DefaultConfig()
|
||||
config.AllowOrigins = []string{"http://google.com"}
|
||||
// config.AllowOrigins = []string{"http://google.com", "http://facebook.com"}
|
||||
|
|
@ -76,20 +123,124 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
Note: while Default() allows all origins, DefaultConfig() does not and you will still have to use AllowAllOrigins.
|
||||
> **Note:** `Default()` allows all origins, but `DefaultConfig()` does **not**. To allow all origins, set `AllowAllOrigins = true`.
|
||||
|
||||
### Default() allows all origins
|
||||
---
|
||||
|
||||
### Default() Convenience
|
||||
|
||||
Enable all origins with a single call:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
router := gin.Default()
|
||||
// same as
|
||||
// config := cors.DefaultConfig()
|
||||
// config.AllowAllOrigins = true
|
||||
// router.Use(cors.New(config))
|
||||
router.Use(cors.Default())
|
||||
router.Run()
|
||||
router.Use(cors.Default()) // Equivalent to AllowAllOrigins = true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
The middleware is controlled via the `cors.Config` struct. All fields are optional unless otherwise stated.
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------------------------------|-----------------------------|-----------------------------------------------------------|-----------------------------------------------------------------------------------------------|
|
||||
| `AllowAllOrigins` | `bool` | `false` | If true, allows all origins. Credentials **cannot** be used. |
|
||||
| `AllowOrigins` | `[]string` | `[]` | List of allowed origins. Supports exact match, `*`, and wildcards. |
|
||||
| `AllowOriginFunc` | `func(string) bool` | `nil` | Custom function to validate origin. If set, `AllowOrigins` is ignored. |
|
||||
| `AllowOriginWithContextFunc` | `func(*gin.Context,string)bool` | `nil` | Like `AllowOriginFunc`, but with request context. |
|
||||
| `AllowMethods` | `[]string` | `[]string{"GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"}` | Allowed HTTP methods. |
|
||||
| `AllowPrivateNetwork` | `bool` | `false` | Adds [Private Network Access](https://wicg.github.io/private-network-access/) CORS header. |
|
||||
| `AllowHeaders` | `[]string` | `[]` | List of non-simple headers permitted in requests. |
|
||||
| `AllowCredentials` | `bool` | `false` | Allow cookies, HTTP auth, or client certs. Only if precise origins are used. |
|
||||
| `ExposeHeaders` | `[]string` | `[]` | Headers exposed to the browser. |
|
||||
| `MaxAge` | `time.Duration` | `12 * time.Hour` | Cache time for preflight requests. |
|
||||
| `AllowWildcard` | `bool` | `false` | Enables wildcards in origins (e.g. `https://*.example.com`). |
|
||||
| `AllowBrowserExtensions` | `bool` | `false` | Allow browser extension schemes as origins (e.g. `chrome-extension://`). |
|
||||
| `CustomSchemas` | `[]string` | `nil` | Additional allowed URI schemes (e.g. `tauri://`). |
|
||||
| `AllowWebSockets` | `bool` | `false` | Allow `ws://` and `wss://` schemas. |
|
||||
| `AllowFiles` | `bool` | `false` | Allow `file://` origins (dangerous; use only if necessary). |
|
||||
| `OptionsResponseStatusCode` | `int` | `204` | Custom status code for `OPTIONS` responses. |
|
||||
|
||||
---
|
||||
|
||||
### Notes on Configuration
|
||||
|
||||
- Only one of `AllowAllOrigins`, `AllowOrigins`, `AllowOriginFunc`, or `AllowOriginWithContextFunc` should be set.
|
||||
- If `AllowAllOrigins` is true, other origin settings are ignored and credentialed requests are not allowed.
|
||||
- If `AllowWildcard` is enabled, only one `*` is allowed per origin string.
|
||||
- Use `AllowBrowserExtensions`, `AllowWebSockets`, or `AllowFiles` to permit non-HTTP(s) protocols as origins.
|
||||
- Custom schemas allow, for example, usage in desktop apps via custom URI schemes (`tauri://`, etc.).
|
||||
- If both `AllowOriginFunc` and `AllowOriginWithContextFunc` are set, the context-specific function is preferred.
|
||||
|
||||
---
|
||||
|
||||
### Examples
|
||||
|
||||
#### Advanced Options
|
||||
|
||||
```go
|
||||
config := cors.Config{
|
||||
AllowOrigins: []string{"https://*.foo.com", "https://bar.com"},
|
||||
AllowWildcard: true,
|
||||
AllowMethods: []string{"GET", "POST"},
|
||||
AllowHeaders: []string{"Authorization", "Content-Type"},
|
||||
AllowCredentials: true,
|
||||
AllowBrowserExtensions: true,
|
||||
AllowWebSockets: true,
|
||||
AllowFiles: false,
|
||||
CustomSchemas: []string{"tauri://"},
|
||||
MaxAge: 24 * time.Hour,
|
||||
ExposeHeaders: []string{"X-Custom-Header"},
|
||||
AllowPrivateNetwork: true,
|
||||
}
|
||||
```
|
||||
|
||||
Using all origins disables the ability for Gin to set cookies for clients. When dealing with credentials, don't allow all origins.
|
||||
#### Custom Origin Validation
|
||||
|
||||
```go
|
||||
config := cors.Config{
|
||||
AllowOriginFunc: func(origin string) bool {
|
||||
// Allow any github.com subdomain or a custom rule
|
||||
return strings.HasSuffix(origin, "github.com")
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
#### With Gin Context
|
||||
|
||||
```go
|
||||
config := cors.Config{
|
||||
AllowOriginWithContextFunc: func(c *gin.Context, origin string) bool {
|
||||
// Allow only if a certain header is present
|
||||
return c.Request.Header.Get("X-Allow-CORS") == "yes"
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Helper Methods
|
||||
|
||||
Dynamically add methods or headers to the config:
|
||||
|
||||
```go
|
||||
config.AddAllowMethods("DELETE", "OPTIONS")
|
||||
config.AddAllowHeaders("X-My-Header")
|
||||
config.AddExposeHeaders("X-Other-Header")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Validation & Error Handling
|
||||
|
||||
- Calling `Validate()` on a `Config` checks for misconfiguration (called internally).
|
||||
- If `AllowAllOrigins` is set, you cannot also set `AllowOrigins` or any `AllowOriginFunc`.
|
||||
- If neither `AllowAllOrigins`, `AllowOriginFunc`, nor `AllowOrigins` is set, an error is raised.
|
||||
- If an `AllowOrigin` contains a wildcard but `AllowWildcard` is not enabled, or more than one `*` is present, a panic is triggered.
|
||||
- Invalid origin schemas or unsupported wildcards are rejected.
|
||||
|
||||
---
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Enabling all origins disables cookies:** When `AllowAllOrigins` is enabled, Gin cannot set cookies for clients. If you need credential sharing (cookies, authentication headers), **do not** allow all origins.
|
||||
- For detailed documentation and configuration options, see the [GoDoc](https://godoc.org/github.com/gin-contrib/cors).
|
||||
|
|
|
|||
2
vendor/github.com/gin-contrib/cors/config.go
generated
vendored
2
vendor/github.com/gin-contrib/cors/config.go
generated
vendored
|
|
@ -87,7 +87,7 @@ func (cors *cors) applyCors(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
cors.handlePreflight(c)
|
||||
defer c.AbortWithStatus(cors.optionsResponseStatusCode)
|
||||
} else {
|
||||
|
|
|
|||
49
vendor/github.com/gin-contrib/sse/.golangci.yml
generated
vendored
49
vendor/github.com/gin-contrib/sse/.golangci.yml
generated
vendored
|
|
@ -1,3 +1,50 @@
|
|||
version: "2"
|
||||
linters:
|
||||
disable:
|
||||
default: none
|
||||
enable:
|
||||
- bodyclose
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exhaustive
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- noctx
|
||||
- nolintlint
|
||||
- rowserrcheck
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
|||
13
vendor/github.com/gin-contrib/sse/sse-decoder.go
generated
vendored
13
vendor/github.com/gin-contrib/sse/sse-decoder.go
generated
vendored
|
|
@ -7,7 +7,6 @@ package sse
|
|||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
|
|
@ -22,7 +21,8 @@ func Decode(r io.Reader) ([]Event, error) {
|
|||
func (d *decoder) dispatchEvent(event Event, data string) {
|
||||
dataLength := len(data)
|
||||
if dataLength > 0 {
|
||||
//If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer.
|
||||
// If the data buffer's last character is a U+000A LINE FEED (LF) character,
|
||||
// then remove the last character from the data buffer.
|
||||
data = data[:dataLength-1]
|
||||
dataLength--
|
||||
}
|
||||
|
|
@ -37,13 +37,13 @@ func (d *decoder) dispatchEvent(event Event, data string) {
|
|||
}
|
||||
|
||||
func (d *decoder) decode(r io.Reader) ([]Event, error) {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var currentEvent Event
|
||||
var dataBuffer *bytes.Buffer = new(bytes.Buffer)
|
||||
dataBuffer := new(bytes.Buffer)
|
||||
// TODO (and unit tests)
|
||||
// Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair,
|
||||
// a single U+000A LINE FEED (LF) character,
|
||||
|
|
@ -96,7 +96,8 @@ func (d *decoder) decode(r io.Reader) ([]Event, error) {
|
|||
currentEvent.Id = string(value)
|
||||
case "retry":
|
||||
// If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9),
|
||||
// then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer.
|
||||
// then interpret the field value as an integer in base ten, and set the event stream's
|
||||
// reconnection time to that integer.
|
||||
// Otherwise, ignore the field.
|
||||
currentEvent.Id = string(value)
|
||||
case "data":
|
||||
|
|
@ -105,7 +106,7 @@ func (d *decoder) decode(r io.Reader) ([]Event, error) {
|
|||
// then append a single U+000A LINE FEED (LF) character to the data buffer.
|
||||
dataBuffer.WriteString("\n")
|
||||
default:
|
||||
//Otherwise. The field is ignored.
|
||||
// Otherwise. The field is ignored.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
|||
38
vendor/github.com/gin-contrib/sse/sse-encoder.go
generated
vendored
38
vendor/github.com/gin-contrib/sse/sse-encoder.go
generated
vendored
|
|
@ -20,8 +20,10 @@ import (
|
|||
|
||||
const ContentType = "text/event-stream;charset=utf-8"
|
||||
|
||||
var contentType = []string{ContentType}
|
||||
var noCache = []string{"no-cache"}
|
||||
var (
|
||||
contentType = []string{ContentType}
|
||||
noCache = []string{"no-cache"}
|
||||
)
|
||||
|
||||
var fieldReplacer = strings.NewReplacer(
|
||||
"\n", "\\n",
|
||||
|
|
@ -48,48 +50,48 @@ func Encode(writer io.Writer, event Event) error {
|
|||
|
||||
func writeId(w stringWriter, id string) {
|
||||
if len(id) > 0 {
|
||||
w.WriteString("id:")
|
||||
fieldReplacer.WriteString(w, id)
|
||||
w.WriteString("\n")
|
||||
_, _ = w.WriteString("id:")
|
||||
_, _ = fieldReplacer.WriteString(w, id)
|
||||
_, _ = w.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func writeEvent(w stringWriter, event string) {
|
||||
if len(event) > 0 {
|
||||
w.WriteString("event:")
|
||||
fieldReplacer.WriteString(w, event)
|
||||
w.WriteString("\n")
|
||||
_, _ = w.WriteString("event:")
|
||||
_, _ = fieldReplacer.WriteString(w, event)
|
||||
_, _ = w.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func writeRetry(w stringWriter, retry uint) {
|
||||
if retry > 0 {
|
||||
w.WriteString("retry:")
|
||||
w.WriteString(strconv.FormatUint(uint64(retry), 10))
|
||||
w.WriteString("\n")
|
||||
_, _ = w.WriteString("retry:")
|
||||
_, _ = w.WriteString(strconv.FormatUint(uint64(retry), 10))
|
||||
_, _ = w.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func writeData(w stringWriter, data interface{}) error {
|
||||
w.WriteString("data:")
|
||||
_, _ = w.WriteString("data:")
|
||||
|
||||
bData, ok := data.([]byte)
|
||||
if ok {
|
||||
dataReplacer.WriteString(w, string(bData))
|
||||
w.WriteString("\n\n")
|
||||
_, _ = dataReplacer.WriteString(w, string(bData))
|
||||
_, _ = w.WriteString("\n\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
switch kindOfData(data) {
|
||||
switch kindOfData(data) { //nolint:exhaustive
|
||||
case reflect.Struct, reflect.Slice, reflect.Map:
|
||||
err := json.NewEncoder(w).Encode(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteString("\n")
|
||||
_, _ = w.WriteString("\n")
|
||||
default:
|
||||
dataReplacer.WriteString(w, fmt.Sprint(data))
|
||||
w.WriteString("\n\n")
|
||||
_, _ = dataReplacer.WriteString(w, fmt.Sprint(data))
|
||||
_, _ = w.WriteString("\n\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/gin-contrib/sse/writer.go
generated
vendored
2
vendor/github.com/gin-contrib/sse/writer.go
generated
vendored
|
|
@ -12,7 +12,7 @@ type stringWrapper struct {
|
|||
}
|
||||
|
||||
func (w stringWrapper) WriteString(str string) (int, error) {
|
||||
return w.Writer.Write([]byte(str))
|
||||
return w.Write([]byte(str))
|
||||
}
|
||||
|
||||
func checkWriter(writer io.Writer) stringWriter {
|
||||
|
|
|
|||
16
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
16
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
|
|
@ -1,26 +1,28 @@
|
|||
version: "2"
|
||||
|
||||
run:
|
||||
timeout: 1m
|
||||
tests: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
default: none
|
||||
enable: # please keep this alphabetized
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- copyloopvar
|
||||
- dupl
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- goconst
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- musttag
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 10
|
||||
|
|
|
|||
8
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
8
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
|
|
@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
|||
write: fn,
|
||||
}
|
||||
// For skipping fnlogger.Info and fnlogger.Error.
|
||||
l.Formatter.AddCallDepth(1)
|
||||
l.AddCallDepth(1) // via Formatter
|
||||
return l
|
||||
}
|
||||
|
||||
|
|
@ -164,17 +164,17 @@ type fnlogger struct {
|
|||
}
|
||||
|
||||
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
l.AddName(name) // via Formatter
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
l.AddValues(kvList) // via Formatter
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
l.AddCallDepth(depth) // via Formatter
|
||||
return &l
|
||||
}
|
||||
|
||||
|
|
|
|||
24
vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go
generated
vendored
24
vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go
generated
vendored
|
|
@ -30,17 +30,18 @@ import (
|
|||
|
||||
// SpecFile command to generate a swagger spec from a go application
|
||||
type SpecFile struct {
|
||||
WorkDir string `long:"work-dir" short:"w" description:"the base path to use" default:"."`
|
||||
BuildTags string `long:"tags" short:"t" description:"build tags" default:""`
|
||||
ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"`
|
||||
Compact bool `long:"compact" description:"when present, doesn't prettify the json"`
|
||||
Output flags.Filename `long:"output" short:"o" description:"the file to write to"`
|
||||
Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"`
|
||||
Include []string `long:"include" short:"c" description:"include packages matching pattern"`
|
||||
Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"`
|
||||
IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"`
|
||||
ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"`
|
||||
ExcludeDeps bool `long:"exclude-deps" short:"" description:"exclude all dependencies of project"`
|
||||
WorkDir string `long:"work-dir" short:"w" description:"the base path to use" default:"."`
|
||||
BuildTags string `long:"tags" short:"t" description:"build tags" default:""`
|
||||
ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"`
|
||||
Compact bool `long:"compact" description:"when present, doesn't prettify the json"`
|
||||
Output flags.Filename `long:"output" short:"o" description:"the file to write to"`
|
||||
Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"`
|
||||
Include []string `long:"include" short:"c" description:"include packages matching pattern"`
|
||||
Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"`
|
||||
IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"`
|
||||
ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"`
|
||||
ExcludeDeps bool `long:"exclude-deps" short:"" description:"exclude all dependencies of project"`
|
||||
SetXNullableForPointers bool `long:"nullable-pointers" short:"n" description:"set x-nullable extension to true automatically for fields of pointer types without 'omitempty'"`
|
||||
}
|
||||
|
||||
// Execute runs this command
|
||||
|
|
@ -65,6 +66,7 @@ func (s *SpecFile) Execute(args []string) error {
|
|||
opts.IncludeTags = s.IncludeTags
|
||||
opts.ExcludeTags = s.ExcludeTags
|
||||
opts.ExcludeDeps = s.ExcludeDeps
|
||||
opts.SetXNullableForPointers = s.SetXNullableForPointers
|
||||
swspec, err := codescan.Run(&opts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
69
vendor/github.com/go-swagger/go-swagger/codescan/application.go
generated
vendored
69
vendor/github.com/go-swagger/go-swagger/codescan/application.go
generated
vendored
|
|
@ -42,16 +42,17 @@ const (
|
|||
|
||||
// Options for the scanner
|
||||
type Options struct {
|
||||
Packages []string
|
||||
InputSpec *spec.Swagger
|
||||
ScanModels bool
|
||||
WorkDir string
|
||||
BuildTags string
|
||||
ExcludeDeps bool
|
||||
Include []string
|
||||
Exclude []string
|
||||
IncludeTags []string
|
||||
ExcludeTags []string
|
||||
Packages []string
|
||||
InputSpec *spec.Swagger
|
||||
ScanModels bool
|
||||
WorkDir string
|
||||
BuildTags string
|
||||
ExcludeDeps bool
|
||||
Include []string
|
||||
Exclude []string
|
||||
IncludeTags []string
|
||||
ExcludeTags []string
|
||||
SetXNullableForPointers bool
|
||||
}
|
||||
|
||||
type scanCtx struct {
|
||||
|
|
@ -94,7 +95,7 @@ func newScanCtx(opts *Options) (*scanCtx, error) {
|
|||
|
||||
app, err := newTypeIndex(pkgs, opts.ExcludeDeps,
|
||||
sliceToSet(opts.IncludeTags), sliceToSet(opts.ExcludeTags),
|
||||
opts.Include, opts.Exclude)
|
||||
opts.Include, opts.Exclude, opts.SetXNullableForPointers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -418,16 +419,17 @@ func (s *scanCtx) FindEnumValues(pkg *packages.Package, enumName string) (list [
|
|||
return list, descList, true
|
||||
}
|
||||
|
||||
func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, excludeTags map[string]bool, includePkgs, excludePkgs []string) (*typeIndex, error) {
|
||||
func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, excludeTags map[string]bool, includePkgs, excludePkgs []string, setXNullableForPointers bool) (*typeIndex, error) {
|
||||
ac := &typeIndex{
|
||||
AllPackages: make(map[string]*packages.Package),
|
||||
Models: make(map[*ast.Ident]*entityDecl),
|
||||
ExtraModels: make(map[*ast.Ident]*entityDecl),
|
||||
excludeDeps: excludeDeps,
|
||||
includeTags: includeTags,
|
||||
excludeTags: excludeTags,
|
||||
includePkgs: includePkgs,
|
||||
excludePkgs: excludePkgs,
|
||||
AllPackages: make(map[string]*packages.Package),
|
||||
Models: make(map[*ast.Ident]*entityDecl),
|
||||
ExtraModels: make(map[*ast.Ident]*entityDecl),
|
||||
excludeDeps: excludeDeps,
|
||||
includeTags: includeTags,
|
||||
excludeTags: excludeTags,
|
||||
includePkgs: includePkgs,
|
||||
excludePkgs: excludePkgs,
|
||||
setXNullableForPointers: setXNullableForPointers,
|
||||
}
|
||||
if err := ac.build(pkgs); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -436,19 +438,20 @@ func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, exclu
|
|||
}
|
||||
|
||||
type typeIndex struct {
|
||||
AllPackages map[string]*packages.Package
|
||||
Models map[*ast.Ident]*entityDecl
|
||||
ExtraModels map[*ast.Ident]*entityDecl
|
||||
Meta []metaSection
|
||||
Routes []parsedPathContent
|
||||
Operations []parsedPathContent
|
||||
Parameters []*entityDecl
|
||||
Responses []*entityDecl
|
||||
excludeDeps bool
|
||||
includeTags map[string]bool
|
||||
excludeTags map[string]bool
|
||||
includePkgs []string
|
||||
excludePkgs []string
|
||||
AllPackages map[string]*packages.Package
|
||||
Models map[*ast.Ident]*entityDecl
|
||||
ExtraModels map[*ast.Ident]*entityDecl
|
||||
Meta []metaSection
|
||||
Routes []parsedPathContent
|
||||
Operations []parsedPathContent
|
||||
Parameters []*entityDecl
|
||||
Responses []*entityDecl
|
||||
excludeDeps bool
|
||||
includeTags map[string]bool
|
||||
excludeTags map[string]bool
|
||||
includePkgs []string
|
||||
excludePkgs []string
|
||||
setXNullableForPointers bool
|
||||
}
|
||||
|
||||
func (a *typeIndex) build(pkgs []*packages.Package) error {
|
||||
|
|
|
|||
2
vendor/github.com/go-swagger/go-swagger/codescan/parameters.go
generated
vendored
2
vendor/github.com/go-swagger/go-swagger/codescan/parameters.go
generated
vendored
|
|
@ -339,7 +339,7 @@ func (p *parameterBuilder) buildFromStruct(decl *entityDecl, tpe *types.Struct,
|
|||
continue
|
||||
}
|
||||
|
||||
name, ignore, _, err := parseJSONTag(afld)
|
||||
name, ignore, _, _, err := parseJSONTag(afld)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/go-swagger/go-swagger/codescan/responses.go
generated
vendored
2
vendor/github.com/go-swagger/go-swagger/codescan/responses.go
generated
vendored
|
|
@ -333,7 +333,7 @@ func (r *responseBuilder) buildFromStruct(decl *entityDecl, tpe *types.Struct, r
|
|||
continue
|
||||
}
|
||||
|
||||
name, ignore, _, err := parseJSONTag(afld)
|
||||
name, ignore, _, _, err := parseJSONTag(afld)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
39
vendor/github.com/go-swagger/go-swagger/codescan/schema.go
generated
vendored
39
vendor/github.com/go-swagger/go-swagger/codescan/schema.go
generated
vendored
|
|
@ -365,6 +365,10 @@ func (s *schemaBuilder) buildFromType(tpe types.Type, tgt swaggerTypable) error
|
|||
return s.buildFromType(titpe.Underlying(), tgt)
|
||||
}
|
||||
|
||||
if titpe.TypeArgs() != nil && titpe.TypeArgs().Len() > 0 {
|
||||
return s.buildFromType(titpe.Underlying(), tgt)
|
||||
}
|
||||
|
||||
switch utitpe := tpe.Underlying().(type) {
|
||||
case *types.Struct:
|
||||
if decl, ok := s.ctx.FindModel(tio.Pkg().Path(), tio.Name()); ok {
|
||||
|
|
@ -407,7 +411,7 @@ func (s *schemaBuilder) buildFromType(tpe types.Type, tgt swaggerTypable) error
|
|||
}
|
||||
|
||||
if defaultName, ok := defaultName(cmt); ok {
|
||||
debugLog(defaultName)
|
||||
debugLog(defaultName) //nolint:govet
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -651,6 +655,12 @@ func (s *schemaBuilder) buildFromInterface(decl *entityDecl, it *types.Interface
|
|||
ps.AddExtension("x-go-name", fld.Name())
|
||||
}
|
||||
|
||||
if s.ctx.app.setXNullableForPointers {
|
||||
if _, isPointer := fld.Type().(*types.Signature).Results().At(0).Type().(*types.Pointer); isPointer && (ps.Extensions == nil || (ps.Extensions["x-nullable"] == nil && ps.Extensions["x-isnullable"] == nil)) {
|
||||
ps.AddExtension("x-nullable", true)
|
||||
}
|
||||
}
|
||||
|
||||
seen[name] = fld.Name()
|
||||
tgt.Properties[name] = ps
|
||||
}
|
||||
|
|
@ -716,7 +726,7 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche
|
|||
continue
|
||||
}
|
||||
|
||||
_, ignore, _, err := parseJSONTag(afld)
|
||||
_, ignore, _, _, err := parseJSONTag(afld)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -816,7 +826,7 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche
|
|||
continue
|
||||
}
|
||||
|
||||
name, ignore, isString, err := parseJSONTag(afld)
|
||||
name, ignore, isString, omitEmpty, err := parseJSONTag(afld)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -853,6 +863,13 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche
|
|||
addExtension(&ps.VendorExtensible, "x-go-name", fld.Name())
|
||||
}
|
||||
|
||||
if s.ctx.app.setXNullableForPointers {
|
||||
if _, isPointer := fld.Type().(*types.Pointer); isPointer && !omitEmpty &&
|
||||
(ps.Extensions == nil || (ps.Extensions["x-nullable"] == nil && ps.Extensions["x-isnullable"] == nil)) {
|
||||
ps.AddExtension("x-nullable", true)
|
||||
}
|
||||
}
|
||||
|
||||
// we have 2 cases:
|
||||
// 1. field with different name override tag
|
||||
// 2. field with different name removes tag
|
||||
|
|
@ -1106,17 +1123,17 @@ func (t tagOptions) Name() string {
|
|||
return t[0]
|
||||
}
|
||||
|
||||
func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, err error) {
|
||||
func parseJSONTag(field *ast.Field) (name string, ignore, isString, omitEmpty bool, err error) {
|
||||
if len(field.Names) > 0 {
|
||||
name = field.Names[0].Name
|
||||
}
|
||||
if field.Tag == nil || len(strings.TrimSpace(field.Tag.Value)) == 0 {
|
||||
return name, false, false, nil
|
||||
return name, false, false, false, nil
|
||||
}
|
||||
|
||||
tv, err := strconv.Unquote(field.Tag.Value)
|
||||
if err != nil {
|
||||
return name, false, false, err
|
||||
return name, false, false, false, err
|
||||
}
|
||||
|
||||
if strings.TrimSpace(tv) != "" {
|
||||
|
|
@ -1129,16 +1146,18 @@ func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, er
|
|||
isString = isFieldStringable(field.Type)
|
||||
}
|
||||
|
||||
omitEmpty = jsonParts.Contain("omitempty")
|
||||
|
||||
switch jsonParts.Name() {
|
||||
case "-":
|
||||
return name, true, isString, nil
|
||||
return name, true, isString, omitEmpty, nil
|
||||
case "":
|
||||
return name, false, isString, nil
|
||||
return name, false, isString, omitEmpty, nil
|
||||
default:
|
||||
return jsonParts.Name(), false, isString, nil
|
||||
return jsonParts.Name(), false, isString, omitEmpty, nil
|
||||
}
|
||||
}
|
||||
return name, false, false, nil
|
||||
return name, false, false, false, nil
|
||||
}
|
||||
|
||||
// isFieldStringable check if the field type is a scalar. If the field type is
|
||||
|
|
|
|||
2
vendor/github.com/go-swagger/go-swagger/generator/operation.go
generated
vendored
2
vendor/github.com/go-swagger/go-swagger/generator/operation.go
generated
vendored
|
|
@ -1258,7 +1258,7 @@ func (b *codeGenOpBuilder) analyzeTags() (string, []string, bool) {
|
|||
return tag, intersected, len(filter) == 0 || len(filter) > 0 && len(intersected) > 0
|
||||
}
|
||||
|
||||
var versionedPkgRex = regexp.MustCompile(`(?i)(v)([0-9]+)`)
|
||||
var versionedPkgRex = regexp.MustCompile(`(?i)^(v)([0-9]+)$`)
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
|
|
|
|||
2
vendor/github.com/go-swagger/go-swagger/generator/shared.go
generated
vendored
2
vendor/github.com/go-swagger/go-swagger/generator/shared.go
generated
vendored
|
|
@ -280,7 +280,7 @@ type TemplateOpts struct {
|
|||
Target string `mapstructure:"target"`
|
||||
FileName string `mapstructure:"file_name"`
|
||||
SkipExists bool `mapstructure:"skip_exists"`
|
||||
SkipFormat bool `mapstructure:"skip_format"`
|
||||
SkipFormat bool `mapstructure:"skip_format"` // not a feature, but for debugging. generated code before formatting might not work because of unused imports.
|
||||
}
|
||||
|
||||
// SectionOpts allows for specifying options to customize the templates used for generation
|
||||
|
|
|
|||
2
vendor/github.com/go-swagger/go-swagger/generator/types.go
generated
vendored
2
vendor/github.com/go-swagger/go-swagger/generator/types.go
generated
vendored
|
|
@ -24,8 +24,8 @@ import (
|
|||
"github.com/go-openapi/loads"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/go-viper/mapstructure/v2"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
|||
2
vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
generated
vendored
|
|
@ -98,7 +98,7 @@ func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Co
|
|||
bucketCors, err := c.getBucketCors(ctx, bucketName)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if errResponse.Code == "NoSuchCORSConfiguration" {
|
||||
if errResponse.Code == NoSuchCORSConfiguration {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
|
|
|
|||
2
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
|
|
@ -104,7 +104,7 @@ func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string
|
|||
bucketPolicy, err := c.getBucketPolicy(ctx, bucketName)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if errResponse.Code == "NoSuchBucketPolicy" {
|
||||
if errResponse.Code == NoSuchBucketPolicy {
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
|
|
|
|||
37
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
37
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
|
|
@ -136,15 +136,15 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||
if objectName == "" {
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "NoSuchBucket",
|
||||
Message: "The specified bucket does not exist.",
|
||||
Code: NoSuchBucket,
|
||||
Message: s3ErrorResponseMap[NoSuchBucket],
|
||||
BucketName: bucketName,
|
||||
}
|
||||
} else {
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "NoSuchKey",
|
||||
Message: "The specified key does not exist.",
|
||||
Code: NoSuchKey,
|
||||
Message: s3ErrorResponseMap[NoSuchKey],
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
|
|
@ -152,23 +152,23 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||
case http.StatusForbidden:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
Code: AccessDenied,
|
||||
Message: s3ErrorResponseMap[AccessDenied],
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
case http.StatusConflict:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "Conflict",
|
||||
Message: "Bucket not empty.",
|
||||
Code: Conflict,
|
||||
Message: s3ErrorResponseMap[Conflict],
|
||||
BucketName: bucketName,
|
||||
}
|
||||
case http.StatusPreconditionFailed:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "PreconditionFailed",
|
||||
Message: s3ErrorResponseMap["PreconditionFailed"],
|
||||
Code: PreconditionFailed,
|
||||
Message: s3ErrorResponseMap[PreconditionFailed],
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
|
|
@ -209,7 +209,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||
if errResp.Region == "" {
|
||||
errResp.Region = resp.Header.Get("x-amz-bucket-region")
|
||||
}
|
||||
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
|
||||
if errResp.Code == InvalidRegion && errResp.Region != "" {
|
||||
errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
|
||||
}
|
||||
|
||||
|
|
@ -218,10 +218,11 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
|||
|
||||
// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
|
||||
func errTransferAccelerationBucket(bucketName string) error {
|
||||
msg := "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’."
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidArgument",
|
||||
Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
|
||||
Code: InvalidArgument,
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
|
|
@ -231,7 +232,7 @@ func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st
|
|||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "EntityTooLarge",
|
||||
Code: EntityTooLarge,
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
@ -243,7 +244,7 @@ func errEntityTooSmall(totalSize int64, bucketName, objectName string) error {
|
|||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "EntityTooSmall",
|
||||
Code: EntityTooSmall,
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
@ -255,7 +256,7 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
|
|||
msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "UnexpectedEOF",
|
||||
Code: UnexpectedEOF,
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
@ -266,7 +267,7 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
|
|||
func errInvalidArgument(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidArgument",
|
||||
Code: InvalidArgument,
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
|
|
@ -277,7 +278,7 @@ func errInvalidArgument(message string) error {
|
|||
func errAPINotSupported(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusNotImplemented,
|
||||
Code: "APINotSupported",
|
||||
Code: APINotSupported,
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
|
|
|
|||
8
vendor/github.com/minio/minio-go/v7/api-get-object.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/api-get-object.go
generated
vendored
|
|
@ -34,14 +34,14 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o
|
|||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Code: InvalidBucketName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Code: XMinioInvalidObjectName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
|
@ -659,14 +659,14 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o
|
|||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ObjectInfo{}, nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Code: InvalidBucketName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ObjectInfo{}, nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Code: XMinioInvalidObjectName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
49
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
49
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
|
|
@ -285,7 +285,7 @@ func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefi
|
|||
// sure proper responses are received.
|
||||
if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
|
||||
return listBucketResult, ErrorResponse{
|
||||
Code: "NotImplemented",
|
||||
Code: NotImplemented,
|
||||
Message: "Truncated response should have continuation token set",
|
||||
}
|
||||
}
|
||||
|
|
@ -419,19 +419,25 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
|
|||
}
|
||||
for _, version := range vers {
|
||||
info := ObjectInfo{
|
||||
ETag: trimEtag(version.ETag),
|
||||
Key: version.Key,
|
||||
LastModified: version.LastModified.Truncate(time.Millisecond),
|
||||
Size: version.Size,
|
||||
Owner: version.Owner,
|
||||
StorageClass: version.StorageClass,
|
||||
IsLatest: version.IsLatest,
|
||||
VersionID: version.VersionID,
|
||||
IsDeleteMarker: version.isDeleteMarker,
|
||||
UserTags: version.UserTags,
|
||||
UserMetadata: version.UserMetadata,
|
||||
Internal: version.Internal,
|
||||
NumVersions: numVersions,
|
||||
ETag: trimEtag(version.ETag),
|
||||
Key: version.Key,
|
||||
LastModified: version.LastModified.Truncate(time.Millisecond),
|
||||
Size: version.Size,
|
||||
Owner: version.Owner,
|
||||
StorageClass: version.StorageClass,
|
||||
IsLatest: version.IsLatest,
|
||||
VersionID: version.VersionID,
|
||||
IsDeleteMarker: version.isDeleteMarker,
|
||||
UserTags: version.UserTags,
|
||||
UserMetadata: version.UserMetadata,
|
||||
Internal: version.Internal,
|
||||
NumVersions: numVersions,
|
||||
ChecksumMode: version.ChecksumType,
|
||||
ChecksumCRC32: version.ChecksumCRC32,
|
||||
ChecksumCRC32C: version.ChecksumCRC32C,
|
||||
ChecksumSHA1: version.ChecksumSHA1,
|
||||
ChecksumSHA256: version.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: version.ChecksumCRC64NVME,
|
||||
}
|
||||
if !yield(info) {
|
||||
return false
|
||||
|
|
@ -753,13 +759,9 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb
|
|||
objectStatCh := make(chan ObjectInfo, 1)
|
||||
go func() {
|
||||
defer close(objectStatCh)
|
||||
send := func(obj ObjectInfo) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
case objectStatCh <- obj:
|
||||
return true
|
||||
}
|
||||
if contextCanceled(ctx) {
|
||||
objectStatCh <- ObjectInfo{Err: ctx.Err()}
|
||||
return
|
||||
}
|
||||
|
||||
var objIter iter.Seq[ObjectInfo]
|
||||
|
|
@ -777,8 +779,11 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb
|
|||
}
|
||||
}
|
||||
for obj := range objIter {
|
||||
if !send(obj) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
objectStatCh <- ObjectInfo{Err: ctx.Err()}
|
||||
return
|
||||
case objectStatCh <- obj:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
|
|||
4
vendor/github.com/minio/minio-go/v7/api-prompt-object.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-prompt-object.go
generated
vendored
|
|
@ -35,14 +35,14 @@ func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, promp
|
|||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Code: InvalidBucketName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Code: XMinioInvalidObjectName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/minio/minio-go/v7/api-put-bucket.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-put-bucket.go
generated
vendored
|
|
@ -35,7 +35,7 @@ func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuc
|
|||
|
||||
err = c.doMakeBucket(ctx, bucketName, opts)
|
||||
if err != nil && (opts.Region == "" || opts.Region == "us-east-1") {
|
||||
if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" {
|
||||
if resp, ok := err.(ErrorResponse); ok && resp.Code == AuthorizationHeaderMalformed && resp.Region != "" {
|
||||
opts.Region = resp.Region
|
||||
err = c.doMakeBucket(ctx, bucketName, opts)
|
||||
}
|
||||
|
|
|
|||
17
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
17
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
|
|
@ -44,7 +44,7 @@ func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName
|
|||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
|
|
@ -392,13 +392,14 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
|||
// Instantiate all the complete multipart buffer.
|
||||
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: completeMultipartUploadBuffer,
|
||||
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
|
||||
customHeader: headers,
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: completeMultipartUploadBuffer,
|
||||
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
|
||||
customHeader: headers,
|
||||
expect200OKWithError: true,
|
||||
}
|
||||
|
||||
// Execute POST to complete multipart upload for an objectName.
|
||||
|
|
|
|||
2
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
|
|
@ -56,7 +56,7 @@ func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objec
|
|||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
|
|
|
|||
187
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
187
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
|
|
@ -22,6 +22,7 @@ import (
|
|||
"context"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"iter"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
|
@ -271,7 +272,7 @@ func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObj
|
|||
for _, obj := range rmResult.UnDeletedObjects {
|
||||
// Version does not exist is not an error ignore and continue.
|
||||
switch obj.Code {
|
||||
case "InvalidArgument", "NoSuchVersion":
|
||||
case InvalidArgument, NoSuchVersion:
|
||||
continue
|
||||
}
|
||||
resultCh <- RemoveObjectResult{
|
||||
|
|
@ -333,6 +334,33 @@ func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh
|
|||
return errorCh
|
||||
}
|
||||
|
||||
// RemoveObjectsWithIter bulk deletes multiple objects from a bucket.
|
||||
// Objects (with optional versions) to be removed must be provided with
|
||||
// an iterator. Objects are removed asynchronously and results must be
|
||||
// consumed. If the returned result iterator is stopped, the context is
|
||||
// canceled, or a remote call failed, the provided iterator will no
|
||||
// longer accept more objects.
|
||||
func (c *Client) RemoveObjectsWithIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], opts RemoveObjectsOptions) (iter.Seq[RemoveObjectResult], error) {
|
||||
// Validate if bucket name is valid.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Validate objects channel to be properly allocated.
|
||||
if objectsIter == nil {
|
||||
return nil, errInvalidArgument("Objects iter can never by nil")
|
||||
}
|
||||
|
||||
return func(yield func(RemoveObjectResult) bool) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
c.removeObjectsIter(ctx, bucketName, objectsIter, yield, opts)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RemoveObjectsWithResult removes multiple objects from a bucket while
|
||||
// it is possible to specify objects versions which are received from
|
||||
// objectsCh. Remove results, successes and failures are sent back via
|
||||
|
|
@ -381,6 +409,144 @@ func hasInvalidXMLChar(str string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Generate and call MultiDelete S3 requests based on entries received from the iterator.
|
||||
func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) {
|
||||
maxEntries := 1000
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("delete", "")
|
||||
|
||||
// Build headers.
|
||||
headers := make(http.Header)
|
||||
if opts.GovernanceBypass {
|
||||
// Set the bypass goverenance retention header
|
||||
headers.Set(amzBypassGovernance, "true")
|
||||
}
|
||||
|
||||
processRemoveMultiObjectsResponseIter := func(batch []ObjectInfo, yield func(RemoveObjectResult) bool) bool {
|
||||
if len(batch) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Generate remove multi objects XML request
|
||||
removeBytes := generateRemoveMultiObjectsRequest(batch)
|
||||
// Execute POST on bucket to remove objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(removeBytes),
|
||||
contentLength: int64(len(removeBytes)),
|
||||
contentMD5Base64: sumMD5Base64(removeBytes),
|
||||
contentSHA256Hex: sum256Hex(removeBytes),
|
||||
customHeader: headers,
|
||||
})
|
||||
if resp != nil {
|
||||
defer closeResponse(resp)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
for _, b := range batch {
|
||||
if !yield(RemoveObjectResult{
|
||||
ObjectName: b.Key,
|
||||
ObjectVersionID: b.VersionID,
|
||||
Err: err,
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Parse multi delete XML response
|
||||
rmResult := &deleteMultiObjectsResult{}
|
||||
if err := xmlDecoder(resp.Body, rmResult); err != nil {
|
||||
yield(RemoveObjectResult{ObjectName: "", Err: err})
|
||||
return false
|
||||
}
|
||||
|
||||
// Fill deletion that returned an error.
|
||||
for _, obj := range rmResult.UnDeletedObjects {
|
||||
// Version does not exist is not an error ignore and continue.
|
||||
switch obj.Code {
|
||||
case "InvalidArgument", "NoSuchVersion":
|
||||
continue
|
||||
}
|
||||
if !yield(RemoveObjectResult{
|
||||
ObjectName: obj.Key,
|
||||
ObjectVersionID: obj.VersionID,
|
||||
Err: ErrorResponse{
|
||||
Code: obj.Code,
|
||||
Message: obj.Message,
|
||||
},
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Fill deletion that returned success
|
||||
for _, obj := range rmResult.DeletedObjects {
|
||||
if !yield(RemoveObjectResult{
|
||||
ObjectName: obj.Key,
|
||||
// Only filled with versioned buckets
|
||||
ObjectVersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
DeleteMarkerVersionID: obj.DeleteMarkerVersionID,
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
var batch []ObjectInfo
|
||||
|
||||
next, stop := iter.Pull(objectsIter)
|
||||
defer stop()
|
||||
|
||||
for {
|
||||
// Loop over entries by 1000 and call MultiDelete requests
|
||||
object, ok := next()
|
||||
if !ok {
|
||||
// delete the remaining batch.
|
||||
processRemoveMultiObjectsResponseIter(batch, yield)
|
||||
return
|
||||
}
|
||||
|
||||
if hasInvalidXMLChar(object.Key) {
|
||||
// Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
|
||||
removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
GovernanceBypass: opts.GovernanceBypass,
|
||||
})
|
||||
if err := removeResult.Err; err != nil {
|
||||
// Version does not exist is not an error ignore and continue.
|
||||
switch ToErrorResponse(err).Code {
|
||||
case "InvalidArgument", "NoSuchVersion":
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !yield(removeResult) {
|
||||
return
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, object)
|
||||
if len(batch) < maxEntries {
|
||||
continue
|
||||
}
|
||||
|
||||
if !processRemoveMultiObjectsResponseIter(batch, yield) {
|
||||
return
|
||||
}
|
||||
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
|
||||
func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
|
||||
maxEntries := 1000
|
||||
|
|
@ -407,7 +573,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
|
|||
if err := removeResult.Err; err != nil {
|
||||
// Version does not exist is not an error ignore and continue.
|
||||
switch ToErrorResponse(err).Code {
|
||||
case "InvalidArgument", "NoSuchVersion":
|
||||
case InvalidArgument, NoSuchVersion:
|
||||
continue
|
||||
}
|
||||
resultCh <- removeResult
|
||||
|
|
@ -442,13 +608,14 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
|
|||
removeBytes := generateRemoveMultiObjectsRequest(batch)
|
||||
// Execute POST on bucket to remove objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(removeBytes),
|
||||
contentLength: int64(len(removeBytes)),
|
||||
contentMD5Base64: sumMD5Base64(removeBytes),
|
||||
contentSHA256Hex: sum256Hex(removeBytes),
|
||||
customHeader: headers,
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(removeBytes),
|
||||
contentLength: int64(len(removeBytes)),
|
||||
contentMD5Base64: sumMD5Base64(removeBytes),
|
||||
contentSHA256Hex: sum256Hex(removeBytes),
|
||||
customHeader: headers,
|
||||
expect200OKWithError: true,
|
||||
})
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
|
@ -535,7 +702,7 @@ func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectNam
|
|||
// This is needed specifically for abort and it cannot
|
||||
// be converged into default case.
|
||||
errorResponse = ErrorResponse{
|
||||
Code: "NoSuchUpload",
|
||||
Code: NoSuchUpload,
|
||||
Message: "The specified multipart upload does not exist.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
|
|||
8
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
|
|
@ -107,6 +107,14 @@ type Version struct {
|
|||
M int // Parity blocks
|
||||
} `xml:"Internal"`
|
||||
|
||||
// Checksum values. Only returned by AiStor servers.
|
||||
ChecksumCRC32 string `xml:",omitempty"`
|
||||
ChecksumCRC32C string `xml:",omitempty"`
|
||||
ChecksumSHA1 string `xml:",omitempty"`
|
||||
ChecksumSHA256 string `xml:",omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
ChecksumType string `xml:",omitempty"`
|
||||
|
||||
isDeleteMarker bool
|
||||
}
|
||||
|
||||
|
|
|
|||
12
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
12
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
|
|
@ -39,14 +39,14 @@ func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, err
|
|||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
if ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
if ToErrorResponse(err).Code == NoSuchBucket {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if resp != nil {
|
||||
resperr := httpRespToErrorResponse(resp, bucketName, "")
|
||||
if ToErrorResponse(resperr).Code == "NoSuchBucket" {
|
||||
if ToErrorResponse(resperr).Code == NoSuchBucket {
|
||||
return false, nil
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
|
@ -63,14 +63,14 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
|
|||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Code: InvalidBucketName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Code: XMinioInvalidObjectName,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
|
@ -102,8 +102,8 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
|
|||
if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker {
|
||||
errResp := ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "MethodNotAllowed",
|
||||
Message: "The specified method is not allowed against this resource.",
|
||||
Code: MethodNotAllowed,
|
||||
Message: s3ErrorResponseMap[MethodNotAllowed],
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
|
|
|
|||
65
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
65
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
|
@ -21,6 +21,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -38,6 +39,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
md5simd "github.com/minio/md5-simd"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/kvcache"
|
||||
|
|
@ -45,6 +47,8 @@ import (
|
|||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio-go/v7/pkg/singleflight"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
|
||||
internalutils "github.com/minio/minio-go/v7/pkg/utils"
|
||||
)
|
||||
|
||||
// Client implements Amazon S3 compatible methods.
|
||||
|
|
@ -159,7 +163,7 @@ type Options struct {
|
|||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.92"
|
||||
libraryVersion = "v7.0.94"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
|
|
@ -455,7 +459,7 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro
|
|||
gcancel()
|
||||
if !IsNetworkOrHostDown(err, false) {
|
||||
switch ToErrorResponse(err).Code {
|
||||
case "NoSuchBucket", "AccessDenied", "":
|
||||
case NoSuchBucket, AccessDenied, "":
|
||||
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
|
||||
}
|
||||
}
|
||||
|
|
@ -477,7 +481,7 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro
|
|||
gcancel()
|
||||
if !IsNetworkOrHostDown(err, false) {
|
||||
switch ToErrorResponse(err).Code {
|
||||
case "NoSuchBucket", "AccessDenied", "":
|
||||
case NoSuchBucket, AccessDenied, "":
|
||||
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
|
||||
}
|
||||
}
|
||||
|
|
@ -512,6 +516,8 @@ type requestMetadata struct {
|
|||
streamSha256 bool
|
||||
addCrc *ChecksumType
|
||||
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
|
||||
|
||||
expect200OKWithError bool
|
||||
}
|
||||
|
||||
// dumpHTTP - dump HTTP request and response.
|
||||
|
|
@ -615,6 +621,28 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// Peek resp.Body looking for S3 XMl error response:
|
||||
// - Return the error XML bytes if an error is found
|
||||
// - Make sure to always restablish the whole http response stream before returning
|
||||
func tryParseErrRespFromBody(resp *http.Response) ([]byte, error) {
|
||||
peeker := internalutils.NewPeekReadCloser(resp.Body, 5*humanize.MiByte)
|
||||
defer func() {
|
||||
peeker.ReplayFromStart()
|
||||
resp.Body = peeker
|
||||
}()
|
||||
|
||||
errResp := ErrorResponse{}
|
||||
errBytes, err := xmlDecodeAndBody(peeker, &errResp)
|
||||
if err != nil {
|
||||
var unmarshalErr xml.UnmarshalError
|
||||
if errors.As(err, &unmarshalErr) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return errBytes, nil
|
||||
}
|
||||
|
||||
// List of success status.
|
||||
var successStatus = []int{
|
||||
http.StatusOK,
|
||||
|
|
@ -702,16 +730,30 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// For any known successful http status, return quickly.
|
||||
var success bool
|
||||
var errBodyBytes []byte
|
||||
|
||||
for _, httpStatus := range successStatus {
|
||||
if httpStatus == res.StatusCode {
|
||||
return res, nil
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Read the body to be saved later.
|
||||
errBodyBytes, err := io.ReadAll(res.Body)
|
||||
// res.Body should be closed
|
||||
if success {
|
||||
if !metadata.expect200OKWithError {
|
||||
return res, nil
|
||||
}
|
||||
errBodyBytes, err = tryParseErrRespFromBody(res)
|
||||
if err == nil && len(errBodyBytes) == 0 {
|
||||
// No S3 XML error is found
|
||||
return res, nil
|
||||
}
|
||||
} else {
|
||||
errBodyBytes, err = io.ReadAll(res.Body)
|
||||
}
|
||||
|
||||
// By now, res.Body should be closed
|
||||
closeResponse(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -723,6 +765,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
|||
|
||||
// For errors verify if its retryable otherwise fail quickly.
|
||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||
err = errResponse
|
||||
|
||||
// Save the body back again.
|
||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||
|
|
@ -736,11 +779,11 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
|||
// region is empty.
|
||||
if c.region == "" {
|
||||
switch errResponse.Code {
|
||||
case "AuthorizationHeaderMalformed":
|
||||
case AuthorizationHeaderMalformed:
|
||||
fallthrough
|
||||
case "InvalidRegion":
|
||||
case InvalidRegion:
|
||||
fallthrough
|
||||
case "AccessDenied":
|
||||
case AccessDenied:
|
||||
if errResponse.Region == "" {
|
||||
// Region is empty we simply return the error.
|
||||
return res, err
|
||||
|
|
|
|||
8
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
|
|
@ -84,18 +84,18 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck
|
|||
// request. Move forward and let the top level callers
|
||||
// succeed if possible based on their policy.
|
||||
switch errResp.Code {
|
||||
case "NotImplemented":
|
||||
case NotImplemented:
|
||||
switch errResp.Server {
|
||||
case "AmazonSnowball":
|
||||
return "snowball", nil
|
||||
case "cloudflare":
|
||||
return "us-east-1", nil
|
||||
}
|
||||
case "AuthorizationHeaderMalformed":
|
||||
case AuthorizationHeaderMalformed:
|
||||
fallthrough
|
||||
case "InvalidRegion":
|
||||
case InvalidRegion:
|
||||
fallthrough
|
||||
case "AccessDenied":
|
||||
case AccessDenied:
|
||||
if errResp.Region == "" {
|
||||
return "us-east-1", nil
|
||||
}
|
||||
|
|
|
|||
308
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
308
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
|
@ -31,6 +31,7 @@ import (
|
|||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"iter"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
|
|
@ -259,7 +260,7 @@ func cleanupVersionedBucket(bucketName string, c *minio.Client) error {
|
|||
}
|
||||
|
||||
func isErrNotImplemented(err error) bool {
|
||||
return minio.ToErrorResponse(err).Code == "NotImplemented"
|
||||
return minio.ToErrorResponse(err).Code == minio.NotImplemented
|
||||
}
|
||||
|
||||
func isRunOnFail() bool {
|
||||
|
|
@ -465,8 +466,8 @@ func testMakeBucketError() {
|
|||
return
|
||||
}
|
||||
// Verify valid error response from server.
|
||||
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
|
||||
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
|
||||
if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists &&
|
||||
minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou {
|
||||
logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
|
||||
return
|
||||
}
|
||||
|
|
@ -1073,7 +1074,7 @@ func testPutObjectWithVersioning() {
|
|||
var results []minio.ObjectInfo
|
||||
for info := range objectsInfo {
|
||||
if info.Err != nil {
|
||||
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
|
||||
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", info.Err)
|
||||
return
|
||||
}
|
||||
results = append(results, info)
|
||||
|
|
@ -3204,7 +3205,7 @@ func testGetObjectAttributesErrorCases() {
|
|||
}
|
||||
|
||||
errorResponse := err.(minio.ErrorResponse)
|
||||
if errorResponse.Code != "NoSuchBucket" {
|
||||
if errorResponse.Code != minio.NoSuchBucket {
|
||||
logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil)
|
||||
return
|
||||
}
|
||||
|
|
@ -3247,8 +3248,8 @@ func testGetObjectAttributesErrorCases() {
|
|||
}
|
||||
|
||||
errorResponse = err.(minio.ErrorResponse)
|
||||
if errorResponse.Code != "NoSuchKey" {
|
||||
logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil)
|
||||
if errorResponse.Code != minio.NoSuchKey {
|
||||
logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchKey+" but got "+errorResponse.Code, nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -3272,8 +3273,8 @@ func testGetObjectAttributesErrorCases() {
|
|||
return
|
||||
}
|
||||
errorResponse = err.(minio.ErrorResponse)
|
||||
if errorResponse.Code != "NoSuchVersion" {
|
||||
logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil)
|
||||
if errorResponse.Code != minio.NoSuchVersion {
|
||||
logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchVersion+" but got "+errorResponse.Code, nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -3928,10 +3929,10 @@ func testRemoveMultipleObjects() {
|
|||
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
|
||||
r := bytes.NewReader(bytes.Repeat([]byte("a"), 1))
|
||||
|
||||
// Multi remove of 1100 objects
|
||||
nrObjects := 200
|
||||
nrObjects := 1100
|
||||
|
||||
objectsCh := make(chan minio.ObjectInfo)
|
||||
|
||||
|
|
@ -3940,7 +3941,7 @@ func testRemoveMultipleObjects() {
|
|||
// Upload objects and send them to objectsCh
|
||||
for i := 0; i < nrObjects; i++ {
|
||||
objectName := "sample" + strconv.Itoa(i) + ".txt"
|
||||
info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
|
||||
info, err := c.PutObject(context.Background(), bucketName, objectName, r, 1,
|
||||
minio.PutObjectOptions{ContentType: "application/octet-stream"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
|
|
@ -3968,6 +3969,78 @@ func testRemoveMultipleObjects() {
|
|||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test removing multiple objects with Remove API as iterator
|
||||
func testRemoveMultipleObjectsIter() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
function := "RemoveObjects(bucketName, objectsCh)"
|
||||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
}
|
||||
|
||||
c, err := NewClient(ClientConfig{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a new random bucket name.
|
||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||
args["bucketName"] = bucketName
|
||||
|
||||
// Make a new bucket.
|
||||
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
buf := []byte("a")
|
||||
|
||||
// Multi remove of 1100 objects
|
||||
nrObjects := 1100
|
||||
|
||||
objectsIter := func() iter.Seq[minio.ObjectInfo] {
|
||||
return func(yield func(minio.ObjectInfo) bool) {
|
||||
// Upload objects and send them to objectsCh
|
||||
for i := 0; i < nrObjects; i++ {
|
||||
objectName := "sample" + strconv.Itoa(i) + ".txt"
|
||||
info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1,
|
||||
minio.PutObjectOptions{ContentType: "application/octet-stream"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
continue
|
||||
}
|
||||
if !yield(minio.ObjectInfo{
|
||||
Key: info.Key,
|
||||
VersionID: info.VersionID,
|
||||
}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call RemoveObjects API
|
||||
results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter(), minio.RemoveObjectsOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Unexpected error", err)
|
||||
return
|
||||
}
|
||||
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
logError(testName, function, args, startTime, "", "Unexpected error", result.Err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test removing multiple objects and check for results
|
||||
func testRemoveMultipleObjectsWithResult() {
|
||||
// initialize logging params
|
||||
|
|
@ -3997,7 +4070,7 @@ func testRemoveMultipleObjectsWithResult() {
|
|||
|
||||
defer cleanupVersionedBucket(bucketName, c)
|
||||
|
||||
r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
|
||||
buf := []byte("a")
|
||||
|
||||
nrObjects := 10
|
||||
nrLockedObjects := 5
|
||||
|
|
@ -4009,7 +4082,7 @@ func testRemoveMultipleObjectsWithResult() {
|
|||
// Upload objects and send them to objectsCh
|
||||
for i := 0; i < nrObjects; i++ {
|
||||
objectName := "sample" + strconv.Itoa(i) + ".txt"
|
||||
info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
|
||||
info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1,
|
||||
minio.PutObjectOptions{ContentType: "application/octet-stream"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
|
|
@ -7589,7 +7662,7 @@ func testGetObjectModified() {
|
|||
|
||||
// Confirm that a Stat() call in between doesn't change the Object's cached etag.
|
||||
_, err = reader.Stat()
|
||||
expectedError := "At least one of the pre-conditions you specified did not hold"
|
||||
expectedError := "At least one of the pre-conditions you specified did not hold."
|
||||
if err.Error() != expectedError {
|
||||
logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
|
||||
return
|
||||
|
|
@ -7751,8 +7824,8 @@ func testMakeBucketErrorV2() {
|
|||
return
|
||||
}
|
||||
// Verify valid error response from server.
|
||||
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
|
||||
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
|
||||
if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists &&
|
||||
minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou {
|
||||
logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
|
||||
return
|
||||
}
|
||||
|
|
@ -11415,6 +11488,87 @@ func testPutObject0ByteV2() {
|
|||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test put object with 0 byte object with non-US-ASCII characters.
|
||||
func testPutObjectMetadataNonUSASCIIV2() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
function := "PutObject(bucketName, objectName, reader, size, opts)"
|
||||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
"objectName": "",
|
||||
"size": 0,
|
||||
"opts": "",
|
||||
}
|
||||
metadata := map[string]string{
|
||||
"test-zh": "你好",
|
||||
"test-ja": "こんにちは",
|
||||
"test-ko": "안녕하세요",
|
||||
"test-ru": "Здравствуй",
|
||||
"test-de": "Hallo",
|
||||
"test-it": "Ciao",
|
||||
"test-pt": "Olá",
|
||||
"test-ar": "مرحبا",
|
||||
"test-hi": "नमस्ते",
|
||||
"test-hu": "Helló",
|
||||
"test-ro": "Bună",
|
||||
"test-be": "Прывiтанне",
|
||||
"test-sl": "Pozdravljen",
|
||||
"test-sr": "Здраво",
|
||||
"test-bg": "Здравейте",
|
||||
"test-uk": "Привіт",
|
||||
}
|
||||
c, err := NewClient(ClientConfig{CredsV2: true})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a new random bucket name.
|
||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||
args["bucketName"] = bucketName
|
||||
|
||||
// Make a new bucket.
|
||||
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
objectName := bucketName + "unique"
|
||||
args["objectName"] = objectName
|
||||
args["opts"] = minio.PutObjectOptions{}
|
||||
|
||||
// Upload an object.
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{
|
||||
UserMetadata: metadata,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
|
||||
return
|
||||
}
|
||||
st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err)
|
||||
return
|
||||
}
|
||||
if st.Size != 0 {
|
||||
logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err)
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range metadata {
|
||||
if st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)) != v {
|
||||
logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get("X-Amz-Meta-"+k), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test expected error cases
|
||||
func testComposeObjectErrorCases() {
|
||||
// initialize logging params
|
||||
|
|
@ -13557,6 +13711,115 @@ func testRemoveObjects() {
|
|||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test deleting multiple objects with object retention set in Governance mode, via iterators
|
||||
func testRemoveObjectsIter() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
function := "RemoveObjects(bucketName, objectsCh, opts)"
|
||||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
"objectPrefix": "",
|
||||
"recursive": "true",
|
||||
}
|
||||
|
||||
c, err := NewClient(ClientConfig{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a new random bucket name.
|
||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||
args["bucketName"] = bucketName
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
// Make a new bucket.
|
||||
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
bufSize := dataFileMap["datafile-129-MB"]
|
||||
reader := getDataReader("datafile-129-MB")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Error uploading object", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Replace with smaller...
|
||||
bufSize = dataFileMap["datafile-10-kB"]
|
||||
reader = getDataReader("datafile-10-kB")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Error uploading object", err)
|
||||
}
|
||||
|
||||
t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC)
|
||||
m := minio.RetentionMode(minio.Governance)
|
||||
opts := minio.PutObjectRetentionOptions{
|
||||
GovernanceBypass: false,
|
||||
RetainUntilDate: &t,
|
||||
Mode: &m,
|
||||
}
|
||||
err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Error setting retention", err)
|
||||
return
|
||||
}
|
||||
|
||||
objectsIter := c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{
|
||||
WithVersions: true,
|
||||
Recursive: true,
|
||||
})
|
||||
results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Error sending delete request", err)
|
||||
return
|
||||
}
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
// Error is expected here because Retention is set on the object
|
||||
// and RemoveObjects is called without Bypass Governance
|
||||
break
|
||||
}
|
||||
logError(testName, function, args, startTime, "", "Expected error during deletion", nil)
|
||||
return
|
||||
}
|
||||
|
||||
objectsIter = c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true})
|
||||
results, err = c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{
|
||||
GovernanceBypass: true,
|
||||
})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Error sending delete request", err)
|
||||
return
|
||||
}
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
// Error is not expected here because Retention is set on the object
|
||||
// and RemoveObjects is called with Bypass Governance
|
||||
logError(testName, function, args, startTime, "", "Error detected during deletion", result.Err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all objects and buckets
|
||||
if err = cleanupVersionedBucket(bucketName, c); err != nil {
|
||||
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Test get bucket tags
|
||||
func testGetBucketTagging() {
|
||||
// initialize logging params
|
||||
|
|
@ -13585,7 +13848,7 @@ func testGetBucketTagging() {
|
|||
}
|
||||
|
||||
_, err = c.GetBucketTagging(context.Background(), bucketName)
|
||||
if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
|
||||
if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
|
||||
logError(testName, function, args, startTime, "", "Invalid error from server failed", err)
|
||||
return
|
||||
}
|
||||
|
|
@ -13627,7 +13890,7 @@ func testSetBucketTagging() {
|
|||
}
|
||||
|
||||
_, err = c.GetBucketTagging(context.Background(), bucketName)
|
||||
if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
|
||||
if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
|
||||
logError(testName, function, args, startTime, "", "Invalid error from server", err)
|
||||
return
|
||||
}
|
||||
|
|
@ -13699,7 +13962,7 @@ func testRemoveBucketTagging() {
|
|||
}
|
||||
|
||||
_, err = c.GetBucketTagging(context.Background(), bucketName)
|
||||
if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
|
||||
if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
|
||||
logError(testName, function, args, startTime, "", "Invalid error from server", err)
|
||||
return
|
||||
}
|
||||
|
|
@ -13740,7 +14003,7 @@ func testRemoveBucketTagging() {
|
|||
}
|
||||
|
||||
_, err = c.GetBucketTagging(context.Background(), bucketName)
|
||||
if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
|
||||
if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet {
|
||||
logError(testName, function, args, startTime, "", "Invalid error from server", err)
|
||||
return
|
||||
}
|
||||
|
|
@ -13809,6 +14072,7 @@ func main() {
|
|||
testPutMultipartObjectWithChecksums(false)
|
||||
testPutMultipartObjectWithChecksums(true)
|
||||
testPutObject0ByteV2()
|
||||
testPutObjectMetadataNonUSASCIIV2()
|
||||
testPutObjectNoLengthV2()
|
||||
testPutObjectsUnknownV2()
|
||||
testGetObjectContextV2()
|
||||
|
|
@ -13826,6 +14090,7 @@ func main() {
|
|||
testGetObjectS3Zip()
|
||||
testRemoveMultipleObjects()
|
||||
testRemoveMultipleObjectsWithResult()
|
||||
testRemoveMultipleObjectsIter()
|
||||
testFPutObjectMultipart()
|
||||
testFPutObject()
|
||||
testGetObjectReadSeekFunctional()
|
||||
|
|
@ -13852,6 +14117,7 @@ func main() {
|
|||
testPutObjectWithContentLanguage()
|
||||
testListObjects()
|
||||
testRemoveObjects()
|
||||
testRemoveObjectsIter()
|
||||
testListObjectVersions()
|
||||
testStatObjectWithVersioning()
|
||||
testGetObjectWithVersioning()
|
||||
|
|
|
|||
45
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
45
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
|
|
@ -730,6 +730,8 @@ type Metrics struct {
|
|||
Errors TimedErrStats `json:"failed,omitempty"`
|
||||
// Total number of entries that are queued for replication
|
||||
QStats InQueueMetric `json:"queued"`
|
||||
// Total number of entries that have replication in progress
|
||||
InProgress InProgressMetric `json:"inProgress"`
|
||||
// Deprecated fields
|
||||
// Total Pending size in bytes across targets
|
||||
PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
|
||||
|
|
@ -830,6 +832,9 @@ type InQueueMetric struct {
|
|||
Max QStat `json:"peak" msg:"pq"`
|
||||
}
|
||||
|
||||
// InProgressMetric holds stats for objects with replication in progress
|
||||
type InProgressMetric InQueueMetric
|
||||
|
||||
// MetricName name of replication metric
|
||||
type MetricName string
|
||||
|
||||
|
|
@ -849,6 +854,14 @@ type WorkerStat struct {
|
|||
Max int32 `json:"max"`
|
||||
}
|
||||
|
||||
// TgtHealth holds health status of a target
|
||||
type TgtHealth struct {
|
||||
Online bool `json:"online"`
|
||||
LastOnline time.Time `json:"lastOnline"`
|
||||
TotalDowntime time.Duration `json:"totalDowntime"`
|
||||
OfflineCount int64 `json:"offlineCount"`
|
||||
}
|
||||
|
||||
// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
|
||||
// and number of entries that failed replication after 3 retries
|
||||
type ReplMRFStats struct {
|
||||
|
|
@ -863,15 +876,18 @@ type ReplMRFStats struct {
|
|||
type ReplQNodeStats struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
Uptime int64 `json:"uptime"`
|
||||
Workers WorkerStat `json:"activeWorkers"`
|
||||
Workers WorkerStat `json:"workers"`
|
||||
|
||||
XferStats map[MetricName]XferStats `json:"transferSummary"`
|
||||
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
|
||||
|
||||
QStats InQueueMetric `json:"queueStats"`
|
||||
MRFStats ReplMRFStats `json:"mrfStats"`
|
||||
Retries CounterSummary `json:"retries"`
|
||||
Errors CounterSummary `json:"errors"`
|
||||
QStats InQueueMetric `json:"queueStats"`
|
||||
InProgressStats InProgressMetric `json:"progressStats"`
|
||||
|
||||
MRFStats ReplMRFStats `json:"mrfStats"`
|
||||
Retries CounterSummary `json:"retries"`
|
||||
Errors CounterSummary `json:"errors"`
|
||||
TgtHealth map[string]TgtHealth `json:"tgtHealth,omitempty"`
|
||||
}
|
||||
|
||||
// CounterSummary denotes the stats counter summary
|
||||
|
|
@ -918,6 +934,19 @@ func (q ReplQueueStats) qStatSummary() InQueueMetric {
|
|||
return m
|
||||
}
|
||||
|
||||
// inProgressSummary returns cluster level stats for objects with replication in progress
|
||||
func (q ReplQueueStats) inProgressSummary() InProgressMetric {
|
||||
m := InProgressMetric{}
|
||||
for _, v := range q.Nodes {
|
||||
m.Avg.Add(v.InProgressStats.Avg)
|
||||
m.Curr.Add(v.InProgressStats.Curr)
|
||||
if m.Max.Count < v.InProgressStats.Max.Count {
|
||||
m.Max.Add(v.InProgressStats.Max)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// ReplQStats holds stats for objects in replication queue
|
||||
type ReplQStats struct {
|
||||
Uptime int64 `json:"uptime"`
|
||||
|
|
@ -926,7 +955,9 @@ type ReplQStats struct {
|
|||
XferStats map[MetricName]XferStats `json:"xferStats"`
|
||||
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
|
||||
|
||||
QStats InQueueMetric `json:"qStats"`
|
||||
QStats InQueueMetric `json:"qStats"`
|
||||
InProgressStats InProgressMetric `json:"progressStats"`
|
||||
|
||||
MRFStats ReplMRFStats `json:"mrfStats"`
|
||||
Retries CounterSummary `json:"retries"`
|
||||
Errors CounterSummary `json:"errors"`
|
||||
|
|
@ -935,10 +966,10 @@ type ReplQStats struct {
|
|||
// QStats returns cluster level stats for objects in replication queue
|
||||
func (q ReplQueueStats) QStats() (r ReplQStats) {
|
||||
r.QStats = q.qStatSummary()
|
||||
r.InProgressStats = q.inProgressSummary()
|
||||
r.XferStats = make(map[MetricName]XferStats)
|
||||
r.TgtXferStats = make(map[string]map[MetricName]XferStats)
|
||||
r.Workers = q.Workers()
|
||||
|
||||
for _, node := range q.Nodes {
|
||||
for arn := range node.TgtXferStats {
|
||||
xmap, ok := node.TgtXferStats[arn]
|
||||
|
|
|
|||
73
vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go
generated
vendored
Normal file
73
vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2025 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PeekReadCloser offers a way to peek a ReadCloser stream and then
|
||||
// return the exact stream of the underlying ReadCloser
|
||||
type PeekReadCloser struct {
|
||||
io.ReadCloser
|
||||
|
||||
recordMode bool
|
||||
recordMaxBuf int
|
||||
recordBuf *bytes.Buffer
|
||||
}
|
||||
|
||||
// ReplayFromStart ensures next Read() will restart to stream the
|
||||
// underlying ReadCloser stream from the beginning
|
||||
func (prc *PeekReadCloser) ReplayFromStart() {
|
||||
prc.recordMode = false
|
||||
}
|
||||
|
||||
func (prc *PeekReadCloser) Read(p []byte) (int, error) {
|
||||
if prc.recordMode {
|
||||
if prc.recordBuf.Len() > prc.recordMaxBuf {
|
||||
return 0, errors.New("maximum peek buffer exceeded")
|
||||
}
|
||||
n, err := prc.ReadCloser.Read(p)
|
||||
prc.recordBuf.Write(p[:n])
|
||||
return n, err
|
||||
}
|
||||
// Replay mode
|
||||
if prc.recordBuf.Len() > 0 {
|
||||
pn, _ := prc.recordBuf.Read(p)
|
||||
return pn, nil
|
||||
}
|
||||
return prc.ReadCloser.Read(p)
|
||||
}
|
||||
|
||||
// Close releases the record buffer memory and close the underlying ReadCloser
|
||||
func (prc *PeekReadCloser) Close() error {
|
||||
prc.recordBuf.Reset()
|
||||
return prc.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// NewPeekReadCloser returns a new peek reader
|
||||
func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser {
|
||||
return &PeekReadCloser{
|
||||
ReadCloser: rc,
|
||||
recordMode: true, // recording mode by default
|
||||
recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)),
|
||||
recordMaxBuf: maxBufSize,
|
||||
}
|
||||
}
|
||||
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
|
|
@ -161,7 +161,7 @@ func (p *PostPolicy) SetTagging(tagging string) error {
|
|||
}
|
||||
_, err := tags.ParseObjectXML(strings.NewReader(tagging))
|
||||
if err != nil {
|
||||
return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint
|
||||
return errors.New(s3ErrorResponseMap[MalformedXML]) //nolint
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
matchType: "eq",
|
||||
|
|
|
|||
2
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/retry.go
generated
vendored
|
|
@ -104,6 +104,8 @@ var retryableS3Codes = map[string]struct{}{
|
|||
"ExpiredToken": {},
|
||||
"ExpiredTokenException": {},
|
||||
"SlowDown": {},
|
||||
"SlowDownWrite": {},
|
||||
"SlowDownRead": {},
|
||||
// Add more AWS S3 codes here.
|
||||
}
|
||||
|
||||
|
|
|
|||
130
vendor/github.com/minio/minio-go/v7/s3-error.go
generated
vendored
130
vendor/github.com/minio/minio-go/v7/s3-error.go
generated
vendored
|
|
@ -17,46 +17,100 @@
|
|||
|
||||
package minio
|
||||
|
||||
// Constants for error keys
|
||||
const (
|
||||
NoSuchBucket = "NoSuchBucket"
|
||||
NoSuchKey = "NoSuchKey"
|
||||
NoSuchUpload = "NoSuchUpload"
|
||||
AccessDenied = "AccessDenied"
|
||||
Conflict = "Conflict"
|
||||
PreconditionFailed = "PreconditionFailed"
|
||||
InvalidArgument = "InvalidArgument"
|
||||
EntityTooLarge = "EntityTooLarge"
|
||||
EntityTooSmall = "EntityTooSmall"
|
||||
UnexpectedEOF = "UnexpectedEOF"
|
||||
APINotSupported = "APINotSupported"
|
||||
InvalidRegion = "InvalidRegion"
|
||||
NoSuchBucketPolicy = "NoSuchBucketPolicy"
|
||||
BadDigest = "BadDigest"
|
||||
IncompleteBody = "IncompleteBody"
|
||||
InternalError = "InternalError"
|
||||
InvalidAccessKeyID = "InvalidAccessKeyId"
|
||||
InvalidBucketName = "InvalidBucketName"
|
||||
InvalidDigest = "InvalidDigest"
|
||||
InvalidRange = "InvalidRange"
|
||||
MalformedXML = "MalformedXML"
|
||||
MissingContentLength = "MissingContentLength"
|
||||
MissingContentMD5 = "MissingContentMD5"
|
||||
MissingRequestBodyError = "MissingRequestBodyError"
|
||||
NotImplemented = "NotImplemented"
|
||||
RequestTimeTooSkewed = "RequestTimeTooSkewed"
|
||||
SignatureDoesNotMatch = "SignatureDoesNotMatch"
|
||||
MethodNotAllowed = "MethodNotAllowed"
|
||||
InvalidPart = "InvalidPart"
|
||||
InvalidPartOrder = "InvalidPartOrder"
|
||||
InvalidObjectState = "InvalidObjectState"
|
||||
AuthorizationHeaderMalformed = "AuthorizationHeaderMalformed"
|
||||
MalformedPOSTRequest = "MalformedPOSTRequest"
|
||||
BucketNotEmpty = "BucketNotEmpty"
|
||||
AllAccessDisabled = "AllAccessDisabled"
|
||||
MalformedPolicy = "MalformedPolicy"
|
||||
MissingFields = "MissingFields"
|
||||
AuthorizationQueryParametersError = "AuthorizationQueryParametersError"
|
||||
MalformedDate = "MalformedDate"
|
||||
BucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
|
||||
InvalidDuration = "InvalidDuration"
|
||||
XAmzContentSHA256Mismatch = "XAmzContentSHA256Mismatch"
|
||||
XMinioInvalidObjectName = "XMinioInvalidObjectName"
|
||||
NoSuchCORSConfiguration = "NoSuchCORSConfiguration"
|
||||
BucketAlreadyExists = "BucketAlreadyExists"
|
||||
NoSuchVersion = "NoSuchVersion"
|
||||
NoSuchTagSet = "NoSuchTagSet"
|
||||
Testing = "Testing"
|
||||
Success = "Success"
|
||||
)
|
||||
|
||||
// Non exhaustive list of AWS S3 standard error responses -
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var s3ErrorResponseMap = map[string]string{
|
||||
"AccessDenied": "Access Denied.",
|
||||
"BadDigest": "The Content-Md5 you specified did not match what we received.",
|
||||
"EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
|
||||
"EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
|
||||
"IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
|
||||
"InternalError": "We encountered an internal error, please try again.",
|
||||
"InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
|
||||
"InvalidBucketName": "The specified bucket is not valid.",
|
||||
"InvalidDigest": "The Content-Md5 you specified is not valid.",
|
||||
"InvalidRange": "The requested range is not satisfiable",
|
||||
"MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
|
||||
"MissingContentLength": "You must provide the Content-Length HTTP header.",
|
||||
"MissingContentMD5": "Missing required header for this request: Content-Md5.",
|
||||
"MissingRequestBodyError": "Request body is empty.",
|
||||
"NoSuchBucket": "The specified bucket does not exist.",
|
||||
"NoSuchBucketPolicy": "The bucket policy does not exist",
|
||||
"NoSuchKey": "The specified key does not exist.",
|
||||
"NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
"NotImplemented": "A header you provided implies functionality that is not implemented",
|
||||
"PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
|
||||
"RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
|
||||
"SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
|
||||
"MethodNotAllowed": "The specified method is not allowed against this resource.",
|
||||
"InvalidPart": "One or more of the specified parts could not be found.",
|
||||
"InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
|
||||
"InvalidObjectState": "The operation is not valid for the current state of the object.",
|
||||
"AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
|
||||
"MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
|
||||
"BucketNotEmpty": "The bucket you tried to delete is not empty",
|
||||
"AllAccessDisabled": "All access to this bucket has been disabled.",
|
||||
"MalformedPolicy": "Policy has invalid resource.",
|
||||
"MissingFields": "Missing fields in request.",
|
||||
"AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
|
||||
"MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
|
||||
"BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
|
||||
"InvalidDuration": "Duration provided in the request is invalid.",
|
||||
"XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
"NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.",
|
||||
AccessDenied: "Access Denied.",
|
||||
BadDigest: "The Content-Md5 you specified did not match what we received.",
|
||||
EntityTooSmall: "Your proposed upload is smaller than the minimum allowed object size.",
|
||||
EntityTooLarge: "Your proposed upload exceeds the maximum allowed object size.",
|
||||
IncompleteBody: "You did not provide the number of bytes specified by the Content-Length HTTP header.",
|
||||
InternalError: "We encountered an internal error, please try again.",
|
||||
InvalidAccessKeyID: "The access key ID you provided does not exist in our records.",
|
||||
InvalidBucketName: "The specified bucket is not valid.",
|
||||
InvalidDigest: "The Content-Md5 you specified is not valid.",
|
||||
InvalidRange: "The requested range is not satisfiable.",
|
||||
MalformedXML: "The XML you provided was not well-formed or did not validate against our published schema.",
|
||||
MissingContentLength: "You must provide the Content-Length HTTP header.",
|
||||
MissingContentMD5: "Missing required header for this request: Content-Md5.",
|
||||
MissingRequestBodyError: "Request body is empty.",
|
||||
NoSuchBucket: "The specified bucket does not exist.",
|
||||
NoSuchBucketPolicy: "The bucket policy does not exist.",
|
||||
NoSuchKey: "The specified key does not exist.",
|
||||
NoSuchUpload: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
|
||||
NotImplemented: "A header you provided implies functionality that is not implemented.",
|
||||
PreconditionFailed: "At least one of the pre-conditions you specified did not hold.",
|
||||
RequestTimeTooSkewed: "The difference between the request time and the server's time is too large.",
|
||||
SignatureDoesNotMatch: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
|
||||
MethodNotAllowed: "The specified method is not allowed against this resource.",
|
||||
InvalidPart: "One or more of the specified parts could not be found.",
|
||||
InvalidPartOrder: "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
|
||||
InvalidObjectState: "The operation is not valid for the current state of the object.",
|
||||
AuthorizationHeaderMalformed: "The authorization header is malformed; the region is wrong.",
|
||||
MalformedPOSTRequest: "The body of your POST request is not well-formed multipart/form-data.",
|
||||
BucketNotEmpty: "The bucket you tried to delete is not empty.",
|
||||
AllAccessDisabled: "All access to this bucket has been disabled.",
|
||||
MalformedPolicy: "Policy has invalid resource.",
|
||||
MissingFields: "Missing fields in request.",
|
||||
AuthorizationQueryParametersError: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
|
||||
MalformedDate: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
|
||||
BucketAlreadyOwnedByYou: "Your previous request to create the named bucket succeeded and you already own it.",
|
||||
InvalidDuration: "Duration provided in the request is invalid.",
|
||||
XAmzContentSHA256Mismatch: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
NoSuchCORSConfiguration: "The specified bucket does not have a CORS configuration.",
|
||||
Conflict: "Bucket not empty.",
|
||||
// Add new API errors here.
|
||||
}
|
||||
|
|
|
|||
22
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
22
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
|
|
@ -30,6 +30,7 @@ import (
|
|||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -210,6 +211,7 @@ func extractObjMetadata(header http.Header) http.Header {
|
|||
"X-Amz-Server-Side-Encryption",
|
||||
"X-Amz-Tagging-Count",
|
||||
"X-Amz-Meta-",
|
||||
"X-Minio-Meta-",
|
||||
// Add new headers to be preserved.
|
||||
// if you add new headers here, please extend
|
||||
// PutObjectOptions{} to preserve them
|
||||
|
|
@ -223,6 +225,16 @@ func extractObjMetadata(header http.Header) http.Header {
|
|||
continue
|
||||
}
|
||||
found = true
|
||||
if prefix == "X-Amz-Meta-" || prefix == "X-Minio-Meta-" {
|
||||
for index, val := range v {
|
||||
if strings.HasPrefix(val, "=?") {
|
||||
decoder := mime.WordDecoder{}
|
||||
if decoded, err := decoder.DecodeHeader(val); err == nil {
|
||||
v[index] = decoded
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if found {
|
||||
|
|
@ -268,7 +280,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
|||
if err != nil {
|
||||
// Content-Length is not valid
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Code: InternalError,
|
||||
Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err),
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
@ -283,7 +295,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
|||
mtime, err := parseRFC7231Time(h.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Code: InternalError,
|
||||
Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err),
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
@ -305,7 +317,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
|||
expiry, err = parseRFC7231Time(expiryStr)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Code: InternalError,
|
||||
Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err),
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
@ -327,7 +339,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
|||
userTags, err := tags.ParseObjectTags(h.Get(amzTaggingHeader))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Code: InternalError,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -336,7 +348,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
|||
tagCount, err = strconv.Atoi(count)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
Code: InternalError,
|
||||
Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err),
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
|
|
|
|||
2
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
2
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
|
|
@ -113,7 +113,7 @@ dockers:
|
|||
checksum:
|
||||
name_template: 'sha256sums.txt'
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
release:
|
||||
github:
|
||||
owner: pelletier
|
||||
|
|
|
|||
2
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
2
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
|
|
@ -59,7 +59,7 @@ func (d *Decoder) DisallowUnknownFields() *Decoder {
|
|||
//
|
||||
// With this feature enabled, types implementing the unstable/Unmarshaler
|
||||
// interface can be decoded from any structure of the document. It allows types
|
||||
// that don't have a straightfoward TOML representation to provide their own
|
||||
// that don't have a straightforward TOML representation to provide their own
|
||||
// decoding logic.
|
||||
//
|
||||
// Currently, types can only decode from a single value. Tables and array tables
|
||||
|
|
|
|||
25
vendor/github.com/prometheus/common/model/time.go
generated
vendored
25
vendor/github.com/prometheus/common/model/time.go
generated
vendored
|
|
@ -201,6 +201,7 @@ var unitMap = map[string]struct {
|
|||
|
||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
// Negative durations are not supported.
|
||||
func ParseDuration(s string) (Duration, error) {
|
||||
switch s {
|
||||
case "0":
|
||||
|
|
@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) {
|
|||
return 0, errors.New("duration out of range")
|
||||
}
|
||||
}
|
||||
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
|
||||
func ParseDurationAllowNegative(s string) (Duration, error) {
|
||||
if s == "" || s[0] != '-' {
|
||||
return ParseDuration(s)
|
||||
}
|
||||
|
||||
d, err := ParseDuration(s[1:])
|
||||
|
||||
return -d, err
|
||||
}
|
||||
|
||||
func (d Duration) String() string {
|
||||
var (
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
r = ""
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
r = ""
|
||||
sign = ""
|
||||
)
|
||||
|
||||
if ms == 0 {
|
||||
return "0s"
|
||||
}
|
||||
|
||||
if ms < 0 {
|
||||
sign, ms = "-", -ms
|
||||
}
|
||||
|
||||
f := func(unit string, mult int64, exact bool) {
|
||||
if exact && ms%mult != 0 {
|
||||
return
|
||||
|
|
@ -286,7 +305,7 @@ func (d Duration) String() string {
|
|||
f("s", 1000, false)
|
||||
f("ms", 1, false)
|
||||
|
||||
return r
|
||||
return sign + r
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
|
|
|
|||
69
vendor/github.com/spf13/cast/alias.go
generated
vendored
Normal file
69
vendor/github.com/spf13/cast/alias.go
generated
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package cast
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
)
|
||||
|
||||
var kindNames = []string{
|
||||
reflect.String: "string",
|
||||
reflect.Bool: "bool",
|
||||
reflect.Int: "int",
|
||||
reflect.Int8: "int8",
|
||||
reflect.Int16: "int16",
|
||||
reflect.Int32: "int32",
|
||||
reflect.Int64: "int64",
|
||||
reflect.Uint: "uint",
|
||||
reflect.Uint8: "uint8",
|
||||
reflect.Uint16: "uint16",
|
||||
reflect.Uint32: "uint32",
|
||||
reflect.Uint64: "uint64",
|
||||
reflect.Float32: "float32",
|
||||
reflect.Float64: "float64",
|
||||
}
|
||||
|
||||
var kinds = map[reflect.Kind]func(reflect.Value) any{
|
||||
reflect.String: func(v reflect.Value) any { return v.String() },
|
||||
reflect.Bool: func(v reflect.Value) any { return v.Bool() },
|
||||
reflect.Int: func(v reflect.Value) any { return int(v.Int()) },
|
||||
reflect.Int8: func(v reflect.Value) any { return int8(v.Int()) },
|
||||
reflect.Int16: func(v reflect.Value) any { return int16(v.Int()) },
|
||||
reflect.Int32: func(v reflect.Value) any { return int32(v.Int()) },
|
||||
reflect.Int64: func(v reflect.Value) any { return v.Int() },
|
||||
reflect.Uint: func(v reflect.Value) any { return uint(v.Uint()) },
|
||||
reflect.Uint8: func(v reflect.Value) any { return uint8(v.Uint()) },
|
||||
reflect.Uint16: func(v reflect.Value) any { return uint16(v.Uint()) },
|
||||
reflect.Uint32: func(v reflect.Value) any { return uint32(v.Uint()) },
|
||||
reflect.Uint64: func(v reflect.Value) any { return v.Uint() },
|
||||
reflect.Float32: func(v reflect.Value) any { return float32(v.Float()) },
|
||||
reflect.Float64: func(v reflect.Value) any { return v.Float() },
|
||||
}
|
||||
|
||||
// resolveAlias attempts to resolve a named type to its underlying basic type (if possible).
|
||||
//
|
||||
// Pointers are expected to be indirected by this point.
|
||||
func resolveAlias(i any) (any, bool) {
|
||||
if i == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(i)
|
||||
|
||||
// Not a named type
|
||||
if t.Name() == "" || slices.Contains(kindNames, t.Name()) {
|
||||
return i, false
|
||||
}
|
||||
|
||||
resolve, ok := kinds[t.Kind()]
|
||||
if !ok { // Not a supported kind
|
||||
return i, false
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
return resolve(v), true
|
||||
}
|
||||
131
vendor/github.com/spf13/cast/basic.go
generated
vendored
Normal file
131
vendor/github.com/spf13/cast/basic.go
generated
vendored
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ToBoolE casts any value to a bool type.
|
||||
func ToBoolE(i any) (bool, error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch b := i.(type) {
|
||||
case bool:
|
||||
return b, nil
|
||||
case nil:
|
||||
return false, nil
|
||||
case int:
|
||||
return b != 0, nil
|
||||
case int8:
|
||||
return b != 0, nil
|
||||
case int16:
|
||||
return b != 0, nil
|
||||
case int32:
|
||||
return b != 0, nil
|
||||
case int64:
|
||||
return b != 0, nil
|
||||
case uint:
|
||||
return b != 0, nil
|
||||
case uint8:
|
||||
return b != 0, nil
|
||||
case uint16:
|
||||
return b != 0, nil
|
||||
case uint32:
|
||||
return b != 0, nil
|
||||
case uint64:
|
||||
return b != 0, nil
|
||||
case float32:
|
||||
return b != 0, nil
|
||||
case float64:
|
||||
return b != 0, nil
|
||||
case time.Duration:
|
||||
return b != 0, nil
|
||||
case string:
|
||||
return strconv.ParseBool(b)
|
||||
case json.Number:
|
||||
v, err := ToInt64E(b)
|
||||
if err == nil {
|
||||
return v != 0, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(errorMsg, i, i, false)
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return ToBoolE(i)
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(errorMsg, i, i, false)
|
||||
}
|
||||
}
|
||||
|
||||
// ToStringE casts any value to a string type.
|
||||
func ToStringE(i any) (string, error) {
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
return s, nil
|
||||
case bool:
|
||||
return strconv.FormatBool(s), nil
|
||||
case float64:
|
||||
return strconv.FormatFloat(s, 'f', -1, 64), nil
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
|
||||
case int:
|
||||
return strconv.Itoa(s), nil
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(s), 10), nil
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(s), 10), nil
|
||||
case int32:
|
||||
return strconv.FormatInt(int64(s), 10), nil
|
||||
case int64:
|
||||
return strconv.FormatInt(s, 10), nil
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint64:
|
||||
return strconv.FormatUint(s, 10), nil
|
||||
case json.Number:
|
||||
return s.String(), nil
|
||||
case []byte:
|
||||
return string(s), nil
|
||||
case template.HTML:
|
||||
return string(s), nil
|
||||
case template.URL:
|
||||
return string(s), nil
|
||||
case template.JS:
|
||||
return string(s), nil
|
||||
case template.CSS:
|
||||
return string(s), nil
|
||||
case template.HTMLAttr:
|
||||
return string(s), nil
|
||||
case nil:
|
||||
return "", nil
|
||||
case fmt.Stringer:
|
||||
return s.String(), nil
|
||||
case error:
|
||||
return s.Error(), nil
|
||||
default:
|
||||
if i, ok := indirect(i); ok {
|
||||
return ToStringE(i)
|
||||
}
|
||||
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return ToStringE(i)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf(errorMsg, i, i, "")
|
||||
}
|
||||
}
|
||||
240
vendor/github.com/spf13/cast/cast.go
generated
vendored
240
vendor/github.com/spf13/cast/cast.go
generated
vendored
|
|
@ -8,187 +8,77 @@ package cast
|
|||
|
||||
import "time"
|
||||
|
||||
// ToBool casts an interface to a bool type.
|
||||
func ToBool(i interface{}) bool {
|
||||
v, _ := ToBoolE(i)
|
||||
return v
|
||||
const errorMsg = "unable to cast %#v of type %T to %T"
|
||||
const errorMsgWith = "unable to cast %#v of type %T to %T: %w"
|
||||
|
||||
// Basic is a type parameter constraint for functions accepting basic types.
|
||||
//
|
||||
// It represents the supported basic types this package can cast to.
|
||||
type Basic interface {
|
||||
string | bool | Number | time.Time | time.Duration
|
||||
}
|
||||
|
||||
// ToTime casts an interface to a time.Time type.
|
||||
func ToTime(i interface{}) time.Time {
|
||||
v, _ := ToTimeE(i)
|
||||
return v
|
||||
// ToE casts any value to a [Basic] type.
|
||||
func ToE[T Basic](i any) (T, error) {
|
||||
var t T
|
||||
|
||||
var v any
|
||||
var err error
|
||||
|
||||
switch any(t).(type) {
|
||||
case string:
|
||||
v, err = ToStringE(i)
|
||||
case bool:
|
||||
v, err = ToBoolE(i)
|
||||
case int:
|
||||
v, err = toNumberE[int](i, parseInt[int])
|
||||
case int8:
|
||||
v, err = toNumberE[int8](i, parseInt[int8])
|
||||
case int16:
|
||||
v, err = toNumberE[int16](i, parseInt[int16])
|
||||
case int32:
|
||||
v, err = toNumberE[int32](i, parseInt[int32])
|
||||
case int64:
|
||||
v, err = toNumberE[int64](i, parseInt[int64])
|
||||
case uint:
|
||||
v, err = toUnsignedNumberE[uint](i, parseUint[uint])
|
||||
case uint8:
|
||||
v, err = toUnsignedNumberE[uint8](i, parseUint[uint8])
|
||||
case uint16:
|
||||
v, err = toUnsignedNumberE[uint16](i, parseUint[uint16])
|
||||
case uint32:
|
||||
v, err = toUnsignedNumberE[uint32](i, parseUint[uint32])
|
||||
case uint64:
|
||||
v, err = toUnsignedNumberE[uint64](i, parseUint[uint64])
|
||||
case float32:
|
||||
v, err = toNumberE[float32](i, parseFloat[float32])
|
||||
case float64:
|
||||
v, err = toNumberE[float64](i, parseFloat[float64])
|
||||
case time.Time:
|
||||
v, err = ToTimeE(i)
|
||||
case time.Duration:
|
||||
v, err = ToDurationE(i)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
return v.(T), nil
|
||||
}
|
||||
|
||||
func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time {
|
||||
v, _ := ToTimeInDefaultLocationE(i, location)
|
||||
return v
|
||||
// Must is a helper that wraps a call to a cast function and panics if the error is non-nil.
|
||||
func Must[T any](i any, err error) T {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return i.(T)
|
||||
}
|
||||
|
||||
// ToDuration casts an interface to a time.Duration type.
|
||||
func ToDuration(i interface{}) time.Duration {
|
||||
v, _ := ToDurationE(i)
|
||||
return v
|
||||
}
|
||||
// To casts any value to a [Basic] type.
|
||||
func To[T Basic](i any) T {
|
||||
v, _ := ToE[T](i)
|
||||
|
||||
// ToFloat64 casts an interface to a float64 type.
|
||||
func ToFloat64(i interface{}) float64 {
|
||||
v, _ := ToFloat64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat32 casts an interface to a float32 type.
|
||||
func ToFloat32(i interface{}) float32 {
|
||||
v, _ := ToFloat32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt64 casts an interface to an int64 type.
|
||||
func ToInt64(i interface{}) int64 {
|
||||
v, _ := ToInt64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt32 casts an interface to an int32 type.
|
||||
func ToInt32(i interface{}) int32 {
|
||||
v, _ := ToInt32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt16 casts an interface to an int16 type.
|
||||
func ToInt16(i interface{}) int16 {
|
||||
v, _ := ToInt16E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt8 casts an interface to an int8 type.
|
||||
func ToInt8(i interface{}) int8 {
|
||||
v, _ := ToInt8E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt casts an interface to an int type.
|
||||
func ToInt(i interface{}) int {
|
||||
v, _ := ToIntE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint casts an interface to a uint type.
|
||||
func ToUint(i interface{}) uint {
|
||||
v, _ := ToUintE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint64 casts an interface to a uint64 type.
|
||||
func ToUint64(i interface{}) uint64 {
|
||||
v, _ := ToUint64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint32 casts an interface to a uint32 type.
|
||||
func ToUint32(i interface{}) uint32 {
|
||||
v, _ := ToUint32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint16 casts an interface to a uint16 type.
|
||||
func ToUint16(i interface{}) uint16 {
|
||||
v, _ := ToUint16E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint8 casts an interface to a uint8 type.
|
||||
func ToUint8(i interface{}) uint8 {
|
||||
v, _ := ToUint8E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToString casts an interface to a string type.
|
||||
func ToString(i interface{}) string {
|
||||
v, _ := ToStringE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapString casts an interface to a map[string]string type.
|
||||
func ToStringMapString(i interface{}) map[string]string {
|
||||
v, _ := ToStringMapStringE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapStringSlice casts an interface to a map[string][]string type.
|
||||
func ToStringMapStringSlice(i interface{}) map[string][]string {
|
||||
v, _ := ToStringMapStringSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapBool casts an interface to a map[string]bool type.
|
||||
func ToStringMapBool(i interface{}) map[string]bool {
|
||||
v, _ := ToStringMapBoolE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapInt casts an interface to a map[string]int type.
|
||||
func ToStringMapInt(i interface{}) map[string]int {
|
||||
v, _ := ToStringMapIntE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapInt64 casts an interface to a map[string]int64 type.
|
||||
func ToStringMapInt64(i interface{}) map[string]int64 {
|
||||
v, _ := ToStringMapInt64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMap casts an interface to a map[string]interface{} type.
|
||||
func ToStringMap(i interface{}) map[string]interface{} {
|
||||
v, _ := ToStringMapE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToSlice casts an interface to a []interface{} type.
|
||||
func ToSlice(i interface{}) []interface{} {
|
||||
v, _ := ToSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToBoolSlice casts an interface to a []bool type.
|
||||
func ToBoolSlice(i interface{}) []bool {
|
||||
v, _ := ToBoolSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringSlice casts an interface to a []string type.
|
||||
func ToStringSlice(i interface{}) []string {
|
||||
v, _ := ToStringSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToIntSlice casts an interface to a []int type.
|
||||
func ToIntSlice(i interface{}) []int {
|
||||
v, _ := ToIntSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt64Slice casts an interface to a []int64 type.
|
||||
func ToInt64Slice(i interface{}) []int64 {
|
||||
v, _ := ToInt64SliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUintSlice casts an interface to a []uint type.
|
||||
func ToUintSlice(i interface{}) []uint {
|
||||
v, _ := ToUintSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat64Slice casts an interface to a []float64 type.
|
||||
func ToFloat64Slice(i interface{}) []float64 {
|
||||
v, _ := ToFloat64SliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToDurationSlice casts an interface to a []time.Duration type.
|
||||
func ToDurationSlice(i interface{}) []time.Duration {
|
||||
v, _ := ToDurationSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
|
|
|||
1472
vendor/github.com/spf13/cast/caste.go
generated
vendored
1472
vendor/github.com/spf13/cast/caste.go
generated
vendored
File diff suppressed because it is too large
Load diff
37
vendor/github.com/spf13/cast/indirect.go
generated
vendored
Normal file
37
vendor/github.com/spf13/cast/indirect.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// From html/template/content.go
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// indirect returns the value, after dereferencing as many times
|
||||
// as necessary to reach the base type (or nil).
|
||||
func indirect(i any) (any, bool) {
|
||||
if i == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if t := reflect.TypeOf(i); t.Kind() != reflect.Ptr {
|
||||
// Avoid creating a reflect.Value if it's not a pointer.
|
||||
return i, false
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
for v.Kind() == reflect.Ptr || (v.Kind() == reflect.Interface && v.Elem().Kind() == reflect.Ptr) {
|
||||
if v.IsNil() {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
return v.Interface(), true
|
||||
}
|
||||
79
vendor/github.com/spf13/cast/internal/time.go
generated
vendored
Normal file
79
vendor/github.com/spf13/cast/internal/time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=TimeFormatType
|
||||
|
||||
type TimeFormatType int
|
||||
|
||||
const (
|
||||
TimeFormatNoTimezone TimeFormatType = iota
|
||||
TimeFormatNamedTimezone
|
||||
TimeFormatNumericTimezone
|
||||
TimeFormatNumericAndNamedTimezone
|
||||
TimeFormatTimeOnly
|
||||
)
|
||||
|
||||
type TimeFormat struct {
|
||||
Format string
|
||||
Typ TimeFormatType
|
||||
}
|
||||
|
||||
func (f TimeFormat) HasTimezone() bool {
|
||||
// We don't include the formats with only named timezones, see
|
||||
// https://github.com/golang/go/issues/19694#issuecomment-289103522
|
||||
return f.Typ >= TimeFormatNumericTimezone && f.Typ <= TimeFormatNumericAndNamedTimezone
|
||||
}
|
||||
|
||||
var TimeFormats = []TimeFormat{
|
||||
// Keep common formats at the top.
|
||||
{"2006-01-02", TimeFormatNoTimezone},
|
||||
{time.RFC3339, TimeFormatNumericTimezone},
|
||||
{"2006-01-02T15:04:05", TimeFormatNoTimezone}, // iso8601 without timezone
|
||||
{time.RFC1123Z, TimeFormatNumericTimezone},
|
||||
{time.RFC1123, TimeFormatNamedTimezone},
|
||||
{time.RFC822Z, TimeFormatNumericTimezone},
|
||||
{time.RFC822, TimeFormatNamedTimezone},
|
||||
{time.RFC850, TimeFormatNamedTimezone},
|
||||
{"2006-01-02 15:04:05.999999999 -0700 MST", TimeFormatNumericAndNamedTimezone}, // Time.String()
|
||||
{"2006-01-02T15:04:05-0700", TimeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
|
||||
{"2006-01-02 15:04:05Z0700", TimeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
|
||||
{"2006-01-02 15:04:05", TimeFormatNoTimezone},
|
||||
{time.ANSIC, TimeFormatNoTimezone},
|
||||
{time.UnixDate, TimeFormatNamedTimezone},
|
||||
{time.RubyDate, TimeFormatNumericTimezone},
|
||||
{"2006-01-02 15:04:05Z07:00", TimeFormatNumericTimezone},
|
||||
{"02 Jan 2006", TimeFormatNoTimezone},
|
||||
{"2006-01-02 15:04:05 -07:00", TimeFormatNumericTimezone},
|
||||
{"2006-01-02 15:04:05 -0700", TimeFormatNumericTimezone},
|
||||
{time.Kitchen, TimeFormatTimeOnly},
|
||||
{time.Stamp, TimeFormatTimeOnly},
|
||||
{time.StampMilli, TimeFormatTimeOnly},
|
||||
{time.StampMicro, TimeFormatTimeOnly},
|
||||
{time.StampNano, TimeFormatTimeOnly},
|
||||
}
|
||||
|
||||
func ParseDateWith(s string, location *time.Location, formats []TimeFormat) (d time.Time, e error) {
|
||||
for _, format := range formats {
|
||||
if d, e = time.Parse(format.Format, s); e == nil {
|
||||
|
||||
// Some time formats have a zone name, but no offset, so it gets
|
||||
// put in that zone name (not the default one passed in to us), but
|
||||
// without that zone's offset. So set the location manually.
|
||||
if format.Typ <= TimeFormatNamedTimezone {
|
||||
if location == nil {
|
||||
location = time.Local
|
||||
}
|
||||
year, month, day := d.Date()
|
||||
hour, min, sec := d.Clock()
|
||||
d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
return d, fmt.Errorf("unable to parse date: %s", s)
|
||||
}
|
||||
27
vendor/github.com/spf13/cast/internal/timeformattype_string.go
generated
vendored
Normal file
27
vendor/github.com/spf13/cast/internal/timeformattype_string.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
// Code generated by "stringer -type=TimeFormatType"; DO NOT EDIT.
|
||||
|
||||
package internal
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[TimeFormatNoTimezone-0]
|
||||
_ = x[TimeFormatNamedTimezone-1]
|
||||
_ = x[TimeFormatNumericTimezone-2]
|
||||
_ = x[TimeFormatNumericAndNamedTimezone-3]
|
||||
_ = x[TimeFormatTimeOnly-4]
|
||||
}
|
||||
|
||||
const _TimeFormatType_name = "TimeFormatNoTimezoneTimeFormatNamedTimezoneTimeFormatNumericTimezoneTimeFormatNumericAndNamedTimezoneTimeFormatTimeOnly"
|
||||
|
||||
var _TimeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119}
|
||||
|
||||
func (i TimeFormatType) String() string {
|
||||
if i < 0 || i >= TimeFormatType(len(_TimeFormatType_index)-1) {
|
||||
return "TimeFormatType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _TimeFormatType_name[_TimeFormatType_index[i]:_TimeFormatType_index[i+1]]
|
||||
}
|
||||
224
vendor/github.com/spf13/cast/map.go
generated
vendored
Normal file
224
vendor/github.com/spf13/cast/map.go
generated
vendored
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) {
|
||||
m := map[K]V{}
|
||||
|
||||
if i == nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[K]V:
|
||||
return v, nil
|
||||
|
||||
case map[K]any:
|
||||
for k, val := range v {
|
||||
m[k] = valFn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]V:
|
||||
for k, val := range v {
|
||||
m[keyFn(k)] = val
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]any:
|
||||
for k, val := range v {
|
||||
m[keyFn(k)] = valFn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case string:
|
||||
err := jsonStringToObject(v, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
}
|
||||
|
||||
func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) {
|
||||
return toMapE(i, ToString, fn)
|
||||
}
|
||||
|
||||
// ToStringMapStringE casts any value to a map[string]string type.
|
||||
func ToStringMapStringE(i any) (map[string]string, error) {
|
||||
return toStringMapE(i, ToString)
|
||||
}
|
||||
|
||||
// ToStringMapStringSliceE casts any value to a map[string][]string type.
|
||||
func ToStringMapStringSliceE(i any) (map[string][]string, error) {
|
||||
m := map[string][]string{}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[string][]string:
|
||||
return v, nil
|
||||
case map[string][]any:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[string]string:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = []string{val}
|
||||
}
|
||||
case map[string]any:
|
||||
for k, val := range v {
|
||||
switch vt := val.(type) {
|
||||
case []any:
|
||||
m[ToString(k)] = ToStringSlice(vt)
|
||||
case []string:
|
||||
m[ToString(k)] = vt
|
||||
default:
|
||||
m[ToString(k)] = []string{ToString(val)}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
case map[any][]string:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[any]string:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[any][]any:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[any]any:
|
||||
for k, val := range v {
|
||||
key, err := ToStringE(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
value, err := ToStringSliceE(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
m[key] = value
|
||||
}
|
||||
case string:
|
||||
err := jsonStringToObject(v, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ToStringMapBoolE casts any value to a map[string]bool type.
|
||||
func ToStringMapBoolE(i any) (map[string]bool, error) {
|
||||
return toStringMapE(i, ToBool)
|
||||
}
|
||||
|
||||
// ToStringMapE casts any value to a map[string]any type.
|
||||
func ToStringMapE(i any) (map[string]any, error) {
|
||||
fn := func(i any) any { return i }
|
||||
|
||||
return toStringMapE(i, fn)
|
||||
}
|
||||
|
||||
func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) {
|
||||
m := map[string]T{}
|
||||
|
||||
if i == nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[string]T:
|
||||
return v, nil
|
||||
|
||||
case map[string]any:
|
||||
for k, val := range v {
|
||||
m[k] = fn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]T:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = val
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]any:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = fn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case string:
|
||||
err := jsonStringToObject(v, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if reflect.TypeOf(i).Kind() != reflect.Map {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
mVal := reflect.ValueOf(m)
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
for _, keyVal := range v.MapKeys() {
|
||||
val, err := fnE(v.MapIndex(keyVal).Interface())
|
||||
if err != nil {
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ToStringMapIntE casts any value to a map[string]int type.
|
||||
func ToStringMapIntE(i any) (map[string]int, error) {
|
||||
return toStringMapIntE(i, ToInt, ToIntE)
|
||||
}
|
||||
|
||||
// ToStringMapInt64E casts any value to a map[string]int64 type.
|
||||
func ToStringMapInt64E(i any) (map[string]int64, error) {
|
||||
return toStringMapIntE(i, ToInt64, ToInt64E)
|
||||
}
|
||||
|
||||
// jsonStringToObject attempts to unmarshall a string as JSON into
|
||||
// the object passed as pointer.
|
||||
func jsonStringToObject(s string, v any) error {
|
||||
data := []byte(s)
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
549
vendor/github.com/spf13/cast/number.go
generated
vendored
Normal file
549
vendor/github.com/spf13/cast/number.go
generated
vendored
Normal file
|
|
@ -0,0 +1,549 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errNegativeNotAllowed = errors.New("unable to cast negative value")
|
||||
|
||||
type float64EProvider interface {
|
||||
Float64() (float64, error)
|
||||
}
|
||||
|
||||
type float64Provider interface {
|
||||
Float64() float64
|
||||
}
|
||||
|
||||
// Number is a type parameter constraint for functions accepting number types.
|
||||
//
|
||||
// It represents the supported number types this package can cast to.
|
||||
type Number interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64
|
||||
}
|
||||
|
||||
type integer interface {
|
||||
int | int8 | int16 | int32 | int64
|
||||
}
|
||||
|
||||
type unsigned interface {
|
||||
uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
||||
|
||||
type float interface {
|
||||
float32 | float64
|
||||
}
|
||||
|
||||
// ToNumberE casts any value to a [Number] type.
|
||||
func ToNumberE[T Number](i any) (T, error) {
|
||||
var t T
|
||||
|
||||
switch any(t).(type) {
|
||||
case int:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int8:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int16:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int32:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int64:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case uint:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint8:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint16:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint32:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint64:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case float32:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case float64:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown number type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
// ToNumber casts any value to a [Number] type.
|
||||
func ToNumber[T Number](i any) T {
|
||||
v, _ := ToNumberE[T](i)
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// toNumber's semantics differ from other "to" functions.
|
||||
// It returns false as the second parameter if the conversion fails.
|
||||
// This is to signal other callers that they should proceed with their own conversions.
|
||||
func toNumber[T Number](i any) (T, bool) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case T:
|
||||
return s, true
|
||||
case int:
|
||||
return T(s), true
|
||||
case int8:
|
||||
return T(s), true
|
||||
case int16:
|
||||
return T(s), true
|
||||
case int32:
|
||||
return T(s), true
|
||||
case int64:
|
||||
return T(s), true
|
||||
case uint:
|
||||
return T(s), true
|
||||
case uint8:
|
||||
return T(s), true
|
||||
case uint16:
|
||||
return T(s), true
|
||||
case uint32:
|
||||
return T(s), true
|
||||
case uint64:
|
||||
return T(s), true
|
||||
case float32:
|
||||
return T(s), true
|
||||
case float64:
|
||||
return T(s), true
|
||||
case bool:
|
||||
if s {
|
||||
return 1, true
|
||||
}
|
||||
|
||||
return 0, true
|
||||
case nil:
|
||||
return 0, true
|
||||
case time.Weekday:
|
||||
return T(s), true
|
||||
case time.Month:
|
||||
return T(s), true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func toNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) {
|
||||
n, ok := toNumber[T](i)
|
||||
if ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case json.Number:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(string(s))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case float64EProvider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
v, err := s.Float64()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
case float64Provider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
return T(s.Float64()), nil
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return toNumberE(i, parseFn)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
}
|
||||
|
||||
func toUnsignedNumber[T Number](i any) (T, bool, bool) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case T:
|
||||
return s, true, true
|
||||
case int:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int8:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int16:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int32:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case uint:
|
||||
return T(s), true, true
|
||||
case uint8:
|
||||
return T(s), true, true
|
||||
case uint16:
|
||||
return T(s), true, true
|
||||
case uint32:
|
||||
return T(s), true, true
|
||||
case uint64:
|
||||
return T(s), true, true
|
||||
case float32:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case float64:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case bool:
|
||||
if s {
|
||||
return 1, true, true
|
||||
}
|
||||
|
||||
return 0, true, true
|
||||
case nil:
|
||||
return 0, true, true
|
||||
case time.Weekday:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case time.Month:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
}
|
||||
|
||||
return 0, true, false
|
||||
}
|
||||
|
||||
func toUnsignedNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) {
|
||||
n, valid, ok := toUnsignedNumber[T](i)
|
||||
if ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
i, _ = indirect(i)
|
||||
|
||||
if !valid {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case json.Number:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(string(s))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case float64EProvider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
v, err := s.Float64()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
case float64Provider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
v := s.Float64()
|
||||
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return toUnsignedNumberE(i, parseFn)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
}
|
||||
|
||||
func parseNumber[T Number](s string) (T, error) {
|
||||
var t T
|
||||
|
||||
switch any(t).(type) {
|
||||
case int:
|
||||
v, err := parseInt[int](s)
|
||||
|
||||
return T(v), err
|
||||
case int8:
|
||||
v, err := parseInt[int8](s)
|
||||
|
||||
return T(v), err
|
||||
case int16:
|
||||
v, err := parseInt[int16](s)
|
||||
|
||||
return T(v), err
|
||||
case int32:
|
||||
v, err := parseInt[int32](s)
|
||||
|
||||
return T(v), err
|
||||
case int64:
|
||||
v, err := parseInt[int64](s)
|
||||
|
||||
return T(v), err
|
||||
case uint:
|
||||
v, err := parseUint[uint](s)
|
||||
|
||||
return T(v), err
|
||||
case uint8:
|
||||
v, err := parseUint[uint8](s)
|
||||
|
||||
return T(v), err
|
||||
case uint16:
|
||||
v, err := parseUint[uint16](s)
|
||||
|
||||
return T(v), err
|
||||
case uint32:
|
||||
v, err := parseUint[uint32](s)
|
||||
|
||||
return T(v), err
|
||||
case uint64:
|
||||
v, err := parseUint[uint64](s)
|
||||
|
||||
return T(v), err
|
||||
case float32:
|
||||
v, err := strconv.ParseFloat(s, 32)
|
||||
|
||||
return T(v), err
|
||||
case float64:
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
|
||||
return T(v), err
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown number type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
func parseInt[T integer](s string) (T, error) {
|
||||
v, err := strconv.ParseInt(trimDecimal(s), 0, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
}
|
||||
|
||||
func parseUint[T unsigned](s string) (T, error) {
|
||||
v, err := strconv.ParseUint(strings.TrimLeft(trimDecimal(s), "+"), 0, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
}
|
||||
|
||||
func parseFloat[T float](s string) (T, error) {
|
||||
var t T
|
||||
|
||||
var v any
|
||||
var err error
|
||||
|
||||
switch any(t).(type) {
|
||||
case float32:
|
||||
n, e := strconv.ParseFloat(s, 32)
|
||||
|
||||
v = float32(n)
|
||||
err = e
|
||||
case float64:
|
||||
n, e := strconv.ParseFloat(s, 64)
|
||||
|
||||
v = float64(n)
|
||||
err = e
|
||||
}
|
||||
|
||||
return v.(T), err
|
||||
}
|
||||
|
||||
// ToFloat64E casts an interface to a float64 type.
|
||||
func ToFloat64E(i any) (float64, error) {
|
||||
return toNumberE[float64](i, parseFloat[float64])
|
||||
}
|
||||
|
||||
// ToFloat32E casts an interface to a float32 type.
|
||||
func ToFloat32E(i any) (float32, error) {
|
||||
return toNumberE[float32](i, parseFloat[float32])
|
||||
}
|
||||
|
||||
// ToInt64E casts an interface to an int64 type.
|
||||
func ToInt64E(i any) (int64, error) {
|
||||
return toNumberE[int64](i, parseInt[int64])
|
||||
}
|
||||
|
||||
// ToInt32E casts an interface to an int32 type.
|
||||
func ToInt32E(i any) (int32, error) {
|
||||
return toNumberE[int32](i, parseInt[int32])
|
||||
}
|
||||
|
||||
// ToInt16E casts an interface to an int16 type.
|
||||
func ToInt16E(i any) (int16, error) {
|
||||
return toNumberE[int16](i, parseInt[int16])
|
||||
}
|
||||
|
||||
// ToInt8E casts an interface to an int8 type.
|
||||
func ToInt8E(i any) (int8, error) {
|
||||
return toNumberE[int8](i, parseInt[int8])
|
||||
}
|
||||
|
||||
// ToIntE casts an interface to an int type.
|
||||
func ToIntE(i any) (int, error) {
|
||||
return toNumberE[int](i, parseInt[int])
|
||||
}
|
||||
|
||||
// ToUintE casts an interface to a uint type.
|
||||
func ToUintE(i any) (uint, error) {
|
||||
return toUnsignedNumberE[uint](i, parseUint[uint])
|
||||
}
|
||||
|
||||
// ToUint64E casts an interface to a uint64 type.
|
||||
func ToUint64E(i any) (uint64, error) {
|
||||
return toUnsignedNumberE[uint64](i, parseUint[uint64])
|
||||
}
|
||||
|
||||
// ToUint32E casts an interface to a uint32 type.
|
||||
func ToUint32E(i any) (uint32, error) {
|
||||
return toUnsignedNumberE[uint32](i, parseUint[uint32])
|
||||
}
|
||||
|
||||
// ToUint16E casts an interface to a uint16 type.
|
||||
func ToUint16E(i any) (uint16, error) {
|
||||
return toUnsignedNumberE[uint16](i, parseUint[uint16])
|
||||
}
|
||||
|
||||
// ToUint8E casts an interface to a uint type.
|
||||
func ToUint8E(i any) (uint8, error) {
|
||||
return toUnsignedNumberE[uint8](i, parseUint[uint8])
|
||||
}
|
||||
|
||||
func trimZeroDecimal(s string) string {
|
||||
var foundZero bool
|
||||
for i := len(s); i > 0; i-- {
|
||||
switch s[i-1] {
|
||||
case '.':
|
||||
if foundZero {
|
||||
return s[:i-1]
|
||||
}
|
||||
case '0':
|
||||
foundZero = true
|
||||
default:
|
||||
return s
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var stringNumberRe = regexp.MustCompile(`^([-+]?\d*)(\.\d*)?$`)
|
||||
|
||||
// see [BenchmarkDecimal] for details about the implementation
|
||||
func trimDecimal(s string) string {
|
||||
if !strings.Contains(s, ".") {
|
||||
return s
|
||||
}
|
||||
|
||||
matches := stringNumberRe.FindStringSubmatch(s)
|
||||
if matches != nil {
|
||||
// matches[1] is the captured integer part with sign
|
||||
s = matches[1]
|
||||
|
||||
// handle special cases
|
||||
switch s {
|
||||
case "-", "+":
|
||||
s += "0"
|
||||
case "":
|
||||
s = "0"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
106
vendor/github.com/spf13/cast/slice.go
generated
vendored
Normal file
106
vendor/github.com/spf13/cast/slice.go
generated
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ToSliceE casts any value to a []any type.
|
||||
func ToSliceE(i any) ([]any, error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
var s []any
|
||||
|
||||
switch v := i.(type) {
|
||||
case []any:
|
||||
// TODO: use slices.Clone
|
||||
return append(s, v...), nil
|
||||
case []map[string]any:
|
||||
for _, u := range v {
|
||||
s = append(s, u)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
default:
|
||||
return s, fmt.Errorf(errorMsg, i, i, s)
|
||||
}
|
||||
}
|
||||
|
||||
func toSliceE[T Basic](i any) ([]T, error) {
|
||||
v, ok, err := toSliceEOk[T](i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, []T{})
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func toSliceEOk[T Basic](i any) ([]T, bool, error) {
|
||||
i, _ = indirect(i)
|
||||
if i == nil {
|
||||
return nil, true, fmt.Errorf(errorMsg, i, i, []T{})
|
||||
}
|
||||
|
||||
switch v := i.(type) {
|
||||
case []T:
|
||||
// TODO: clone slice
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
kind := reflect.TypeOf(i).Kind()
|
||||
switch kind {
|
||||
case reflect.Slice, reflect.Array:
|
||||
s := reflect.ValueOf(i)
|
||||
a := make([]T, s.Len())
|
||||
|
||||
for j := 0; j < s.Len(); j++ {
|
||||
val, err := ToE[T](s.Index(j).Interface())
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf(errorMsg, i, i, []T{})
|
||||
}
|
||||
|
||||
a[j] = val
|
||||
}
|
||||
|
||||
return a, true, nil
|
||||
default:
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ToStringSliceE casts any value to a []string type.
|
||||
func ToStringSliceE(i any) ([]string, error) {
|
||||
if a, ok, err := toSliceEOk[string](i); ok {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var a []string
|
||||
|
||||
switch v := i.(type) {
|
||||
case string:
|
||||
return strings.Fields(v), nil
|
||||
case any:
|
||||
str, err := ToStringE(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, a)
|
||||
}
|
||||
|
||||
return []string{str}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(errorMsg, i, i, a)
|
||||
}
|
||||
}
|
||||
116
vendor/github.com/spf13/cast/time.go
generated
vendored
Normal file
116
vendor/github.com/spf13/cast/time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cast/internal"
|
||||
)
|
||||
|
||||
// ToTimeE any value to a [time.Time] type.
|
||||
func ToTimeE(i any) (time.Time, error) {
|
||||
return ToTimeInDefaultLocationE(i, time.UTC)
|
||||
}
|
||||
|
||||
// ToTimeInDefaultLocationE casts an empty interface to [time.Time],
|
||||
// interpreting inputs without a timezone to be in the given location,
|
||||
// or the local timezone if nil.
|
||||
func ToTimeInDefaultLocationE(i any, location *time.Location) (tim time.Time, err error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch v := i.(type) {
|
||||
case time.Time:
|
||||
return v, nil
|
||||
case string:
|
||||
return StringToDateInDefaultLocation(v, location)
|
||||
case json.Number:
|
||||
// Originally this used ToInt64E, but adding string float conversion broke ToTime.
|
||||
// the behavior of ToTime would have changed if we continued using it.
|
||||
// For now, using json.Number's own Int64 method should be good enough to preserve backwards compatibility.
|
||||
v = json.Number(trimZeroDecimal(string(v)))
|
||||
s, err1 := v.Int64()
|
||||
if err1 != nil {
|
||||
return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{})
|
||||
}
|
||||
return time.Unix(s, 0), nil
|
||||
case int:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case int32:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case int64:
|
||||
return time.Unix(v, 0), nil
|
||||
case uint:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case uint32:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case uint64:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case nil:
|
||||
return time.Time{}, nil
|
||||
default:
|
||||
return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
// ToDurationE casts any value to a [time.Duration] type.
|
||||
func ToDurationE(i any) (time.Duration, error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case time.Duration:
|
||||
return s, nil
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
v, err := ToInt64E(s)
|
||||
if err != nil {
|
||||
// TODO: once there is better error handling, this should be easier
|
||||
return 0, errors.New(strings.ReplaceAll(err.Error(), " int64", "time.Duration"))
|
||||
}
|
||||
|
||||
return time.Duration(v), nil
|
||||
case float32, float64, float64EProvider, float64Provider:
|
||||
v, err := ToFloat64E(s)
|
||||
if err != nil {
|
||||
// TODO: once there is better error handling, this should be easier
|
||||
return 0, errors.New(strings.ReplaceAll(err.Error(), " float64", "time.Duration"))
|
||||
}
|
||||
|
||||
return time.Duration(v), nil
|
||||
case string:
|
||||
if !strings.ContainsAny(s, "nsuµmh") {
|
||||
return time.ParseDuration(s + "ns")
|
||||
}
|
||||
|
||||
return time.ParseDuration(s)
|
||||
case nil:
|
||||
return time.Duration(0), nil
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return ToDurationE(i)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(errorMsg, i, i, time.Duration(0))
|
||||
}
|
||||
}
|
||||
|
||||
// StringToDate attempts to parse a string into a [time.Time] type using a
|
||||
// predefined list of formats.
|
||||
//
|
||||
// If no suitable format is found, an error is returned.
|
||||
func StringToDate(s string) (time.Time, error) {
|
||||
return internal.ParseDateWith(s, time.UTC, internal.TimeFormats)
|
||||
}
|
||||
|
||||
// StringToDateInDefaultLocation casts an empty interface to a [time.Time],
|
||||
// interpreting inputs without a timezone to be in the given location,
|
||||
// or the local timezone if nil.
|
||||
func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) {
|
||||
return internal.ParseDateWith(s, location, internal.TimeFormats)
|
||||
}
|
||||
27
vendor/github.com/spf13/cast/timeformattype_string.go
generated
vendored
27
vendor/github.com/spf13/cast/timeformattype_string.go
generated
vendored
|
|
@ -1,27 +0,0 @@
|
|||
// Code generated by "stringer -type timeFormatType"; DO NOT EDIT.
|
||||
|
||||
package cast
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[timeFormatNoTimezone-0]
|
||||
_ = x[timeFormatNamedTimezone-1]
|
||||
_ = x[timeFormatNumericTimezone-2]
|
||||
_ = x[timeFormatNumericAndNamedTimezone-3]
|
||||
_ = x[timeFormatTimeOnly-4]
|
||||
}
|
||||
|
||||
const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly"
|
||||
|
||||
var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119}
|
||||
|
||||
func (i timeFormatType) String() string {
|
||||
if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) {
|
||||
return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]]
|
||||
}
|
||||
261
vendor/github.com/spf13/cast/zz_generated.go
generated
vendored
Normal file
261
vendor/github.com/spf13/cast/zz_generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
// Code generated by cast generator. DO NOT EDIT.
|
||||
|
||||
package cast
|
||||
|
||||
import "time"
|
||||
|
||||
// ToBool casts any value to a(n) bool type.
|
||||
func ToBool(i any) bool {
|
||||
v, _ := ToBoolE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToString casts any value to a(n) string type.
|
||||
func ToString(i any) string {
|
||||
v, _ := ToStringE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToTime casts any value to a(n) time.Time type.
|
||||
func ToTime(i any) time.Time {
|
||||
v, _ := ToTimeE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToTimeInDefaultLocation casts any value to a(n) time.Time type.
|
||||
func ToTimeInDefaultLocation(i any, location *time.Location) time.Time {
|
||||
v, _ := ToTimeInDefaultLocationE(i, location)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToDuration casts any value to a(n) time.Duration type.
|
||||
func ToDuration(i any) time.Duration {
|
||||
v, _ := ToDurationE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt casts any value to a(n) int type.
|
||||
func ToInt(i any) int {
|
||||
v, _ := ToIntE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt8 casts any value to a(n) int8 type.
|
||||
func ToInt8(i any) int8 {
|
||||
v, _ := ToInt8E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt16 casts any value to a(n) int16 type.
|
||||
func ToInt16(i any) int16 {
|
||||
v, _ := ToInt16E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt32 casts any value to a(n) int32 type.
|
||||
func ToInt32(i any) int32 {
|
||||
v, _ := ToInt32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt64 casts any value to a(n) int64 type.
|
||||
func ToInt64(i any) int64 {
|
||||
v, _ := ToInt64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint casts any value to a(n) uint type.
|
||||
func ToUint(i any) uint {
|
||||
v, _ := ToUintE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint8 casts any value to a(n) uint8 type.
|
||||
func ToUint8(i any) uint8 {
|
||||
v, _ := ToUint8E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint16 casts any value to a(n) uint16 type.
|
||||
func ToUint16(i any) uint16 {
|
||||
v, _ := ToUint16E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint32 casts any value to a(n) uint32 type.
|
||||
func ToUint32(i any) uint32 {
|
||||
v, _ := ToUint32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint64 casts any value to a(n) uint64 type.
|
||||
func ToUint64(i any) uint64 {
|
||||
v, _ := ToUint64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat32 casts any value to a(n) float32 type.
|
||||
func ToFloat32(i any) float32 {
|
||||
v, _ := ToFloat32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat64 casts any value to a(n) float64 type.
|
||||
func ToFloat64(i any) float64 {
|
||||
v, _ := ToFloat64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapString casts any value to a(n) map[string]string type.
|
||||
func ToStringMapString(i any) map[string]string {
|
||||
v, _ := ToStringMapStringE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapStringSlice casts any value to a(n) map[string][]string type.
|
||||
func ToStringMapStringSlice(i any) map[string][]string {
|
||||
v, _ := ToStringMapStringSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapBool casts any value to a(n) map[string]bool type.
|
||||
func ToStringMapBool(i any) map[string]bool {
|
||||
v, _ := ToStringMapBoolE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapInt casts any value to a(n) map[string]int type.
|
||||
func ToStringMapInt(i any) map[string]int {
|
||||
v, _ := ToStringMapIntE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapInt64 casts any value to a(n) map[string]int64 type.
|
||||
func ToStringMapInt64(i any) map[string]int64 {
|
||||
v, _ := ToStringMapInt64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMap casts any value to a(n) map[string]any type.
|
||||
func ToStringMap(i any) map[string]any {
|
||||
v, _ := ToStringMapE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToSlice casts any value to a(n) []any type.
|
||||
func ToSlice(i any) []any {
|
||||
v, _ := ToSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToBoolSlice casts any value to a(n) []bool type.
|
||||
func ToBoolSlice(i any) []bool {
|
||||
v, _ := ToBoolSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringSlice casts any value to a(n) []string type.
|
||||
func ToStringSlice(i any) []string {
|
||||
v, _ := ToStringSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToIntSlice casts any value to a(n) []int type.
|
||||
func ToIntSlice(i any) []int {
|
||||
v, _ := ToIntSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt64Slice casts any value to a(n) []int64 type.
|
||||
func ToInt64Slice(i any) []int64 {
|
||||
v, _ := ToInt64SliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUintSlice casts any value to a(n) []uint type.
|
||||
func ToUintSlice(i any) []uint {
|
||||
v, _ := ToUintSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat64Slice casts any value to a(n) []float64 type.
|
||||
func ToFloat64Slice(i any) []float64 {
|
||||
v, _ := ToFloat64SliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToDurationSlice casts any value to a(n) []time.Duration type.
|
||||
func ToDurationSlice(i any) []time.Duration {
|
||||
v, _ := ToDurationSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToBoolSliceE casts any value to a(n) []bool type.
|
||||
func ToBoolSliceE(i any) ([]bool, error) {
|
||||
return toSliceE[bool](i)
|
||||
}
|
||||
|
||||
// ToDurationSliceE casts any value to a(n) []time.Duration type.
|
||||
func ToDurationSliceE(i any) ([]time.Duration, error) {
|
||||
return toSliceE[time.Duration](i)
|
||||
}
|
||||
|
||||
// ToIntSliceE casts any value to a(n) []int type.
|
||||
func ToIntSliceE(i any) ([]int, error) {
|
||||
return toSliceE[int](i)
|
||||
}
|
||||
|
||||
// ToInt8SliceE casts any value to a(n) []int8 type.
|
||||
func ToInt8SliceE(i any) ([]int8, error) {
|
||||
return toSliceE[int8](i)
|
||||
}
|
||||
|
||||
// ToInt16SliceE casts any value to a(n) []int16 type.
|
||||
func ToInt16SliceE(i any) ([]int16, error) {
|
||||
return toSliceE[int16](i)
|
||||
}
|
||||
|
||||
// ToInt32SliceE casts any value to a(n) []int32 type.
|
||||
func ToInt32SliceE(i any) ([]int32, error) {
|
||||
return toSliceE[int32](i)
|
||||
}
|
||||
|
||||
// ToInt64SliceE casts any value to a(n) []int64 type.
|
||||
func ToInt64SliceE(i any) ([]int64, error) {
|
||||
return toSliceE[int64](i)
|
||||
}
|
||||
|
||||
// ToUintSliceE casts any value to a(n) []uint type.
|
||||
func ToUintSliceE(i any) ([]uint, error) {
|
||||
return toSliceE[uint](i)
|
||||
}
|
||||
|
||||
// ToUint8SliceE casts any value to a(n) []uint8 type.
|
||||
func ToUint8SliceE(i any) ([]uint8, error) {
|
||||
return toSliceE[uint8](i)
|
||||
}
|
||||
|
||||
// ToUint16SliceE casts any value to a(n) []uint16 type.
|
||||
func ToUint16SliceE(i any) ([]uint16, error) {
|
||||
return toSliceE[uint16](i)
|
||||
}
|
||||
|
||||
// ToUint32SliceE casts any value to a(n) []uint32 type.
|
||||
func ToUint32SliceE(i any) ([]uint32, error) {
|
||||
return toSliceE[uint32](i)
|
||||
}
|
||||
|
||||
// ToUint64SliceE casts any value to a(n) []uint64 type.
|
||||
func ToUint64SliceE(i any) ([]uint64, error) {
|
||||
return toSliceE[uint64](i)
|
||||
}
|
||||
|
||||
// ToFloat32SliceE casts any value to a(n) []float32 type.
|
||||
func ToFloat32SliceE(i any) ([]float32, error) {
|
||||
return toSliceE[float32](i)
|
||||
}
|
||||
|
||||
// ToFloat64SliceE casts any value to a(n) []float64 type.
|
||||
func ToFloat64SliceE(i any) ([]float64, error) {
|
||||
return toSliceE[float64](i)
|
||||
}
|
||||
32
vendor/github.com/ugorji/go/codec/README.md
generated
vendored
32
vendor/github.com/ugorji/go/codec/README.md
generated
vendored
|
|
@ -80,6 +80,32 @@ Rich Feature Set includes:
|
|||
rpc server/client codec to support msgpack-rpc protocol defined at:
|
||||
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
|
||||
# Supported build tags
|
||||
|
||||
We gain performance by code-generating fast-paths for slices and maps of built-in types,
|
||||
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
|
||||
|
||||
The results are 20-40% performance improvements.
|
||||
|
||||
Building and running is configured using build tags as below.
|
||||
|
||||
At runtime:
|
||||
|
||||
- codec.safe: run in safe mode (not using unsafe optimizations)
|
||||
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
|
||||
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
|
||||
|
||||
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
|
||||
Pls use these mostly during development - use codec.XXX in your go files.
|
||||
|
||||
Build only:
|
||||
|
||||
- codec.build: used to generate fastpath and monomorphization code
|
||||
|
||||
Test only:
|
||||
|
||||
- codec.notmammoth: skip the mammoth generated tests
|
||||
|
||||
# Extension Support
|
||||
|
||||
Users can register a function to handle the encoding or decoding of their custom
|
||||
|
|
@ -219,6 +245,12 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
|
|||
go test -tags "alltests codec.safe" -run Suite
|
||||
```
|
||||
|
||||
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
|
||||
|
||||
```
|
||||
go test -tags codec.notmono -run Json
|
||||
```
|
||||
|
||||
# Running Benchmarks
|
||||
|
||||
```
|
||||
|
|
|
|||
259
vendor/github.com/ugorji/go/codec/base.fastpath.generated.go
generated
vendored
Normal file
259
vendor/github.com/ugorji/go/codec/base.fastpath.generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
//go:build !notfastpath && !codec.notfastpath
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// Code generated from fastpath.go.tmpl - DO NOT EDIT.
|
||||
|
||||
package codec
|
||||
|
||||
// Fast path functions try to create a fast path encode or decode implementation
|
||||
// for common maps and slices.
|
||||
//
|
||||
// We define the functions and register them in this single file
|
||||
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
|
||||
// This file can be omitted without causing a build failure.
|
||||
//
|
||||
// The advantage of fast paths is:
|
||||
// - Many calls bypass reflection altogether
|
||||
//
|
||||
// Currently support
|
||||
// - slice of all builtin types (numeric, bool, string, []byte)
|
||||
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
|
||||
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
|
||||
// AND values of type type int8/16/32, uint16/32
|
||||
// This should provide adequate "typical" implementations.
|
||||
//
|
||||
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
||||
// For example:
|
||||
// m2 := map[string]int{}
|
||||
// p2 := []interface{}{m2}
|
||||
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
|
||||
//
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const fastpathEnabled = true
|
||||
|
||||
type fastpathARtid [56]uintptr
|
||||
|
||||
type fastpathRtRtid struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
}
|
||||
type fastpathARtRtid [56]fastpathRtRtid
|
||||
|
||||
var (
|
||||
fastpathAvRtidArr fastpathARtid
|
||||
fastpathAvRtRtidArr fastpathARtRtid
|
||||
fastpathAvRtid = fastpathAvRtidArr[:]
|
||||
fastpathAvRtRtid = fastpathAvRtRtidArr[:]
|
||||
)
|
||||
|
||||
func fastpathAvIndex(rtid uintptr) (i uint, ok bool) {
|
||||
return searchRtids(fastpathAvRtid, rtid)
|
||||
}
|
||||
|
||||
func init() {
|
||||
var i uint = 0
|
||||
fn := func(v interface{}) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
xrtid := rt2id(xrt)
|
||||
xptrtid := rt2id(reflect.PointerTo(xrt))
|
||||
fastpathAvRtid[i] = xrtid
|
||||
fastpathAvRtRtid[i] = fastpathRtRtid{rtid: xrtid, rt: xrt}
|
||||
encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid)
|
||||
decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid)
|
||||
i++
|
||||
}
|
||||
|
||||
fn([]interface{}(nil))
|
||||
fn([]string(nil))
|
||||
fn([][]byte(nil))
|
||||
fn([]float32(nil))
|
||||
fn([]float64(nil))
|
||||
fn([]uint8(nil))
|
||||
fn([]uint64(nil))
|
||||
fn([]int(nil))
|
||||
fn([]int32(nil))
|
||||
fn([]int64(nil))
|
||||
fn([]bool(nil))
|
||||
|
||||
fn(map[string]interface{}(nil))
|
||||
fn(map[string]string(nil))
|
||||
fn(map[string][]byte(nil))
|
||||
fn(map[string]uint8(nil))
|
||||
fn(map[string]uint64(nil))
|
||||
fn(map[string]int(nil))
|
||||
fn(map[string]int32(nil))
|
||||
fn(map[string]float64(nil))
|
||||
fn(map[string]bool(nil))
|
||||
fn(map[uint8]interface{}(nil))
|
||||
fn(map[uint8]string(nil))
|
||||
fn(map[uint8][]byte(nil))
|
||||
fn(map[uint8]uint8(nil))
|
||||
fn(map[uint8]uint64(nil))
|
||||
fn(map[uint8]int(nil))
|
||||
fn(map[uint8]int32(nil))
|
||||
fn(map[uint8]float64(nil))
|
||||
fn(map[uint8]bool(nil))
|
||||
fn(map[uint64]interface{}(nil))
|
||||
fn(map[uint64]string(nil))
|
||||
fn(map[uint64][]byte(nil))
|
||||
fn(map[uint64]uint8(nil))
|
||||
fn(map[uint64]uint64(nil))
|
||||
fn(map[uint64]int(nil))
|
||||
fn(map[uint64]int32(nil))
|
||||
fn(map[uint64]float64(nil))
|
||||
fn(map[uint64]bool(nil))
|
||||
fn(map[int]interface{}(nil))
|
||||
fn(map[int]string(nil))
|
||||
fn(map[int][]byte(nil))
|
||||
fn(map[int]uint8(nil))
|
||||
fn(map[int]uint64(nil))
|
||||
fn(map[int]int(nil))
|
||||
fn(map[int]int32(nil))
|
||||
fn(map[int]float64(nil))
|
||||
fn(map[int]bool(nil))
|
||||
fn(map[int32]interface{}(nil))
|
||||
fn(map[int32]string(nil))
|
||||
fn(map[int32][]byte(nil))
|
||||
fn(map[int32]uint8(nil))
|
||||
fn(map[int32]uint64(nil))
|
||||
fn(map[int32]int(nil))
|
||||
fn(map[int32]int32(nil))
|
||||
fn(map[int32]float64(nil))
|
||||
fn(map[int32]bool(nil))
|
||||
|
||||
sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] })
|
||||
sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid })
|
||||
slices.Sort(encBuiltinRtids)
|
||||
slices.Sort(decBuiltinRtids)
|
||||
}
|
||||
|
||||
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
|
||||
switch v := iv.(type) {
|
||||
case *[]interface{}:
|
||||
*v = nil
|
||||
case *[]string:
|
||||
*v = nil
|
||||
case *[][]byte:
|
||||
*v = nil
|
||||
case *[]float32:
|
||||
*v = nil
|
||||
case *[]float64:
|
||||
*v = nil
|
||||
case *[]uint8:
|
||||
*v = nil
|
||||
case *[]uint64:
|
||||
*v = nil
|
||||
case *[]int:
|
||||
*v = nil
|
||||
case *[]int32:
|
||||
*v = nil
|
||||
case *[]int64:
|
||||
*v = nil
|
||||
case *[]bool:
|
||||
*v = nil
|
||||
|
||||
case *map[string]interface{}:
|
||||
*v = nil
|
||||
case *map[string]string:
|
||||
*v = nil
|
||||
case *map[string][]byte:
|
||||
*v = nil
|
||||
case *map[string]uint8:
|
||||
*v = nil
|
||||
case *map[string]uint64:
|
||||
*v = nil
|
||||
case *map[string]int:
|
||||
*v = nil
|
||||
case *map[string]int32:
|
||||
*v = nil
|
||||
case *map[string]float64:
|
||||
*v = nil
|
||||
case *map[string]bool:
|
||||
*v = nil
|
||||
case *map[uint8]interface{}:
|
||||
*v = nil
|
||||
case *map[uint8]string:
|
||||
*v = nil
|
||||
case *map[uint8][]byte:
|
||||
*v = nil
|
||||
case *map[uint8]uint8:
|
||||
*v = nil
|
||||
case *map[uint8]uint64:
|
||||
*v = nil
|
||||
case *map[uint8]int:
|
||||
*v = nil
|
||||
case *map[uint8]int32:
|
||||
*v = nil
|
||||
case *map[uint8]float64:
|
||||
*v = nil
|
||||
case *map[uint8]bool:
|
||||
*v = nil
|
||||
case *map[uint64]interface{}:
|
||||
*v = nil
|
||||
case *map[uint64]string:
|
||||
*v = nil
|
||||
case *map[uint64][]byte:
|
||||
*v = nil
|
||||
case *map[uint64]uint8:
|
||||
*v = nil
|
||||
case *map[uint64]uint64:
|
||||
*v = nil
|
||||
case *map[uint64]int:
|
||||
*v = nil
|
||||
case *map[uint64]int32:
|
||||
*v = nil
|
||||
case *map[uint64]float64:
|
||||
*v = nil
|
||||
case *map[uint64]bool:
|
||||
*v = nil
|
||||
case *map[int]interface{}:
|
||||
*v = nil
|
||||
case *map[int]string:
|
||||
*v = nil
|
||||
case *map[int][]byte:
|
||||
*v = nil
|
||||
case *map[int]uint8:
|
||||
*v = nil
|
||||
case *map[int]uint64:
|
||||
*v = nil
|
||||
case *map[int]int:
|
||||
*v = nil
|
||||
case *map[int]int32:
|
||||
*v = nil
|
||||
case *map[int]float64:
|
||||
*v = nil
|
||||
case *map[int]bool:
|
||||
*v = nil
|
||||
case *map[int32]interface{}:
|
||||
*v = nil
|
||||
case *map[int32]string:
|
||||
*v = nil
|
||||
case *map[int32][]byte:
|
||||
*v = nil
|
||||
case *map[int32]uint8:
|
||||
*v = nil
|
||||
case *map[int32]uint64:
|
||||
*v = nil
|
||||
case *map[int32]int:
|
||||
*v = nil
|
||||
case *map[int32]int32:
|
||||
*v = nil
|
||||
case *map[int32]float64:
|
||||
*v = nil
|
||||
case *map[int32]bool:
|
||||
*v = nil
|
||||
|
||||
default:
|
||||
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
6259
vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go
generated
vendored
Normal file
6259
vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -2,7 +2,6 @@
|
|||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build notfastpath || codec.notfastpath
|
||||
// +build notfastpath codec.notfastpath
|
||||
|
||||
package codec
|
||||
|
||||
|
|
@ -18,24 +17,18 @@ const fastpathEnabled = false
|
|||
// This tag disables fastpath during build, allowing for faster build, test execution,
|
||||
// short-program runs, etc.
|
||||
|
||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
|
||||
|
||||
// func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
|
||||
// func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
|
||||
|
||||
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
|
||||
|
||||
type fastpathT struct{}
|
||||
type fastpathE struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
encfn func(*Encoder, *codecFnInfo, reflect.Value)
|
||||
decfn func(*Decoder, *codecFnInfo, reflect.Value)
|
||||
func fastpathAvIndex(rtid uintptr) (uint, bool) { return 0, false }
|
||||
|
||||
type fastpathRtRtid struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
}
|
||||
type fastpathA [0]fastpathE
|
||||
|
||||
func fastpathAvIndex(rtid uintptr) int { return -1 }
|
||||
type fastpathARtRtid [0]fastpathRtRtid
|
||||
|
||||
var fastpathAv fastpathA
|
||||
var fastpathTV fastpathT
|
||||
var fastpathAvRtRtid fastpathARtRtid
|
||||
26
vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go
generated
vendored
Normal file
26
vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go
generated
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
//go:build notfastpath || (codec.notfastpath && (notmono || codec.notmono))
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
// type fastpathT struct{}
|
||||
type fastpathE[T encDriver] struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoder[T], *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathD[T decDriver] struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoder[T], *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEs[T encDriver] [0]fastpathE[T]
|
||||
type fastpathDs[T decDriver] [0]fastpathD[T]
|
||||
|
||||
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { return false }
|
||||
func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool { return false }
|
||||
|
||||
func (helperEncDriver[T]) fastpathEList() (v *fastpathEs[T]) { return }
|
||||
func (helperDecDriver[T]) fastpathDList() (v *fastpathDs[T]) { return }
|
||||
194
vendor/github.com/ugorji/go/codec/binc.base.go
generated
vendored
Normal file
194
vendor/github.com/ugorji/go/codec/binc.base.go
generated
vendored
Normal file
|
|
@ -0,0 +1,194 @@
|
|||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Symbol management:
|
||||
// - symbols are stored in a symbol map during encoding and decoding.
|
||||
// - the symbols persist until the (En|De)coder ResetXXX method is called.
|
||||
|
||||
const bincDoPrune = true
|
||||
|
||||
// vd as low 4 bits (there are 16 slots)
|
||||
const (
|
||||
bincVdSpecial byte = iota
|
||||
bincVdPosInt
|
||||
bincVdNegInt
|
||||
bincVdFloat
|
||||
|
||||
bincVdString
|
||||
bincVdByteArray
|
||||
bincVdArray
|
||||
bincVdMap
|
||||
|
||||
bincVdTimestamp
|
||||
bincVdSmallInt
|
||||
_ // bincVdUnicodeOther
|
||||
bincVdSymbol
|
||||
|
||||
_ // bincVdDecimal
|
||||
_ // open slot
|
||||
_ // open slot
|
||||
bincVdCustomExt = 0x0f
|
||||
)
|
||||
|
||||
const (
|
||||
bincSpNil byte = iota
|
||||
bincSpFalse
|
||||
bincSpTrue
|
||||
bincSpNan
|
||||
bincSpPosInf
|
||||
bincSpNegInf
|
||||
bincSpZeroFloat
|
||||
bincSpZero
|
||||
bincSpNegOne
|
||||
)
|
||||
|
||||
const (
|
||||
_ byte = iota // bincFlBin16
|
||||
bincFlBin32
|
||||
_ // bincFlBin32e
|
||||
bincFlBin64
|
||||
_ // bincFlBin64e
|
||||
// others not currently supported
|
||||
)
|
||||
|
||||
const bincBdNil = 0 // bincVdSpecial<<4 | bincSpNil // staticcheck barfs on this (SA4016)
|
||||
|
||||
var (
|
||||
bincdescSpecialVsNames = map[byte]string{
|
||||
bincSpNil: "nil",
|
||||
bincSpFalse: "false",
|
||||
bincSpTrue: "true",
|
||||
bincSpNan: "float",
|
||||
bincSpPosInf: "float",
|
||||
bincSpNegInf: "float",
|
||||
bincSpZeroFloat: "float",
|
||||
bincSpZero: "uint",
|
||||
bincSpNegOne: "int",
|
||||
}
|
||||
bincdescVdNames = map[byte]string{
|
||||
bincVdSpecial: "special",
|
||||
bincVdSmallInt: "uint",
|
||||
bincVdPosInt: "uint",
|
||||
bincVdFloat: "float",
|
||||
bincVdSymbol: "string",
|
||||
bincVdString: "string",
|
||||
bincVdByteArray: "bytes",
|
||||
bincVdTimestamp: "time",
|
||||
bincVdCustomExt: "ext",
|
||||
bincVdArray: "array",
|
||||
bincVdMap: "map",
|
||||
}
|
||||
)
|
||||
|
||||
func bincdescbd(bd byte) (s string) {
|
||||
return bincdesc(bd>>4, bd&0x0f)
|
||||
}
|
||||
|
||||
func bincdesc(vd, vs byte) (s string) {
|
||||
if vd == bincVdSpecial {
|
||||
s = bincdescSpecialVsNames[vs]
|
||||
} else {
|
||||
s = bincdescVdNames[vd]
|
||||
}
|
||||
if s == "" {
|
||||
s = "unknown"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type bincEncState struct {
|
||||
m map[string]uint16 // symbols
|
||||
}
|
||||
|
||||
// func (e *bincEncState) restoreState(v interface{}) { e.m = v.(map[string]uint16) }
|
||||
// func (e bincEncState) captureState() interface{} { return e.m }
|
||||
// func (e *bincEncState) resetState() { e.m = nil }
|
||||
// func (e *bincEncState) reset() { e.resetState() }
|
||||
func (e *bincEncState) reset() { e.m = nil }
|
||||
|
||||
type bincDecState struct {
|
||||
bdRead bool
|
||||
bd byte
|
||||
vd byte
|
||||
vs byte
|
||||
|
||||
_ bool
|
||||
// MARKER: consider using binary search here instead of a map (ie bincDecSymbol)
|
||||
s map[uint16][]byte
|
||||
}
|
||||
|
||||
// func (x bincDecState) captureState() interface{} { return x }
|
||||
// func (x *bincDecState) resetState() { *x = bincDecState{} }
|
||||
// func (x *bincDecState) reset() { x.resetState() }
|
||||
// func (x *bincDecState) restoreState(v interface{}) { *x = v.(bincDecState) }
|
||||
func (x *bincDecState) reset() { *x = bincDecState{} }
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// BincHandle is a Handle for the Binc Schema-Free Encoding Format
|
||||
// defined at https://github.com/ugorji/binc .
|
||||
//
|
||||
// BincHandle currently supports all Binc features with the following EXCEPTIONS:
|
||||
// - only integers up to 64 bits of precision are supported.
|
||||
// big integers are unsupported.
|
||||
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
|
||||
// extended precision and decimal IEEE 754 floats are unsupported.
|
||||
// - Only UTF-8 strings supported.
|
||||
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
|
||||
//
|
||||
// Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
|
||||
type BincHandle struct {
|
||||
binaryEncodingType
|
||||
notJsonType
|
||||
// noElemSeparators
|
||||
BasicHandle
|
||||
|
||||
// AsSymbols defines what should be encoded as symbols.
|
||||
//
|
||||
// Encoding as symbols can reduce the encoded size significantly.
|
||||
//
|
||||
// However, during decoding, each string to be encoded as a symbol must
|
||||
// be checked to see if it has been seen before. Consequently, encoding time
|
||||
// will increase if using symbols, because string comparisons has a clear cost.
|
||||
//
|
||||
// Values:
|
||||
// - 0: default: library uses best judgement
|
||||
// - 1: use symbols
|
||||
// - 2: do not use symbols
|
||||
AsSymbols uint8
|
||||
|
||||
// AsSymbols: may later on introduce more options ...
|
||||
// - m: map keys
|
||||
// - s: struct fields
|
||||
// - n: none
|
||||
// - a: all: same as m, s, ...
|
||||
|
||||
// _ [7]uint64 // padding (cache-aligned)
|
||||
}
|
||||
|
||||
// Name returns the name of the handle: binc
|
||||
func (h *BincHandle) Name() string { return "binc" }
|
||||
|
||||
func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) }
|
||||
|
||||
// SetBytesExt sets an extension
|
||||
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
|
||||
// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
|
||||
func bincEncodeTime(t time.Time) []byte {
|
||||
return customEncodeTime(t)
|
||||
}
|
||||
|
||||
func bincDecodeTime(bs []byte) (tt time.Time, err error) {
|
||||
return customDecodeTime(bs)
|
||||
}
|
||||
12482
vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1009
vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
1009
vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
File diff suppressed because it is too large
Load diff
8158
vendor/github.com/ugorji/go/codec/binc.mono.generated.go
generated
vendored
Normal file
8158
vendor/github.com/ugorji/go/codec/binc.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
52
vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathEBincBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderBincBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDBincBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderBincBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsBincBytes [0]fastpathEBincBytes
|
||||
type fastpathDsBincBytes [0]fastpathDBincBytes
|
||||
|
||||
func (helperEncDriverBincBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverBincBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverBincBytes) fastpathEList() (v *fastpathEsBincBytes) { return }
|
||||
func (helperDecDriverBincBytes) fastpathDList() (v *fastpathDsBincBytes) { return }
|
||||
|
||||
type fastpathEBincIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderBincIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDBincIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderBincIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsBincIO [0]fastpathEBincIO
|
||||
type fastpathDsBincIO [0]fastpathDBincIO
|
||||
|
||||
func (helperEncDriverBincIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverBincIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverBincIO) fastpathEList() (v *fastpathEsBincIO) { return }
|
||||
func (helperDecDriverBincIO) fastpathDList() (v *fastpathDsBincIO) { return }
|
||||
355
vendor/github.com/ugorji/go/codec/build.sh
generated
vendored
355
vendor/github.com/ugorji/go/codec/build.sh
generated
vendored
|
|
@ -1,232 +1,61 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run all the different permutations of all the tests and other things
|
||||
# This helps ensure that nothing gets broken.
|
||||
# Build and Run the different test permutations.
|
||||
# This helps validate that nothing gets broken.
|
||||
|
||||
_tests() {
|
||||
local vet="" # TODO: make it off
|
||||
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
|
||||
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
|
||||
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
|
||||
case $gover in
|
||||
go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;;
|
||||
*) return 1
|
||||
esac
|
||||
# note that codecgen requires fastpath, so you cannot do "codecgen codec.notfastpath"
|
||||
# we test the following permutations wnich all execute different code paths as below.
|
||||
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe), (codecgen/unsafe)"
|
||||
local echo=1
|
||||
local nc=2 # count
|
||||
local cpus="1,$(nproc)"
|
||||
# if using the race detector, then set nc to
|
||||
if [[ " ${zargs[@]} " =~ "-race" ]]; then
|
||||
cpus="$(nproc)"
|
||||
fi
|
||||
local a=( "" "codec.notfastpath" "codec.safe" "codec.notfastpath codec.safe" "codecgen" )
|
||||
local b=()
|
||||
local c=()
|
||||
for i in "${a[@]}"
|
||||
do
|
||||
local i2=${i:-default}
|
||||
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
|
||||
[[ "$zcover" == "1" ]] && c=( -coverprofile "${i2// /-}.cov.out" )
|
||||
true &&
|
||||
${gocmd} vet -printfuncs "errorf" "$@" &&
|
||||
if [[ "$echo" == 1 ]]; then set -o xtrace; fi &&
|
||||
${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" -count $nc -cpu $cpus -run "TestCodecSuite" "${c[@]}" "$@" &
|
||||
if [[ "$echo" == 1 ]]; then set +o xtrace; fi
|
||||
b+=("${i2// /-}.cov.out")
|
||||
[[ "$zwait" == "1" ]] && wait
|
||||
|
||||
# if [[ "$?" != 0 ]]; then return 1; fi
|
||||
_build_proceed() {
|
||||
# return success (0) if we should, and 1 (fail) if not
|
||||
if [[ "${zforce}" ]]; then return 0; fi
|
||||
for a in "fastpath.generated.go" "json.mono.generated.go"; do
|
||||
if [[ ! -e "$a" ]]; then return 0; fi
|
||||
for b in `ls -1 *.go.tmpl gen.go gen_mono.go values_test.go`; do
|
||||
if [[ "$a" -ot "$b" ]]; then return 0; fi
|
||||
done
|
||||
done
|
||||
if [[ "$zextra" == "1" ]]; then
|
||||
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'codec.notfastpath x'; RUN: 'Test.*X$'"
|
||||
[[ "$zcover" == "1" ]] && c=( -coverprofile "x.cov.out" )
|
||||
${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "codec.notfastpath x" -count $nc -run 'Test.*X$' "${c[@]}" &
|
||||
b+=("x.cov.out")
|
||||
[[ "$zwait" == "1" ]] && wait
|
||||
fi
|
||||
wait
|
||||
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
|
||||
[[ "$zcover" == "1" ]] &&
|
||||
command -v gocovmerge &&
|
||||
gocovmerge "${b[@]}" > __merge.cov.out &&
|
||||
${gocmd} tool cover -html=__merge.cov.out
|
||||
return 1
|
||||
}
|
||||
|
||||
# is a generation needed?
|
||||
_ng() {
|
||||
local a="$1"
|
||||
if [[ ! -e "$a" ]]; then echo 1; return; fi
|
||||
for i in `ls -1 *.go.tmpl gen.go values_test.go`
|
||||
do
|
||||
if [[ "$a" -ot "$i" ]]; then echo 1; return; fi
|
||||
done
|
||||
}
|
||||
|
||||
_prependbt() {
|
||||
cat > ${2} <<EOF
|
||||
// +build generated
|
||||
|
||||
EOF
|
||||
cat ${1} >> ${2}
|
||||
rm -f ${1}
|
||||
}
|
||||
|
||||
# _build generates fast-path.go and gen-helper.go.
|
||||
# _build generates fastpath.go
|
||||
_build() {
|
||||
if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi
|
||||
|
||||
# if ! [[ "${zforce}" || $(_ng "fastpath.generated.go") || $(_ng "json.mono.generated.go") ]]; then return 0; fi
|
||||
_build_proceed
|
||||
if [ $? -eq 1 ]; then return 0; fi
|
||||
if [ "${zbak}" ]; then
|
||||
_zts=`date '+%m%d%Y_%H%M%S'`
|
||||
_gg=".generated.go"
|
||||
[ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
|
||||
[ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
|
||||
[ -e "fastpath${_gg}" ] && mv fastpath${_gg} fastpath${_gg}__${_zts}.bak
|
||||
[ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak
|
||||
fi
|
||||
rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \
|
||||
*safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
|
||||
fi
|
||||
|
||||
rm -f fast*path.generated.go *mono*generated.go *_generated_test.go gen-from-tmpl*.generated.go
|
||||
|
||||
cat > gen.generated.go <<EOF
|
||||
// +build codecgen.exec
|
||||
local btags="codec.build codec.notmono codec.safe codec.notfastpath"
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
|
||||
|
||||
const genDecMapTmpl = \`
|
||||
EOF
|
||||
cat >> gen.generated.go < gen-dec-map.go.tmpl
|
||||
cat >> gen.generated.go <<EOF
|
||||
\`
|
||||
|
||||
const genDecListTmpl = \`
|
||||
EOF
|
||||
cat >> gen.generated.go < gen-dec-array.go.tmpl
|
||||
cat >> gen.generated.go <<EOF
|
||||
\`
|
||||
|
||||
const genEncChanTmpl = \`
|
||||
EOF
|
||||
cat >> gen.generated.go < gen-enc-chan.go.tmpl
|
||||
cat >> gen.generated.go <<EOF
|
||||
\`
|
||||
EOF
|
||||
cat > gen-from-tmpl.codec.generated.go <<EOF
|
||||
package codec
|
||||
func GenRunTmpl2Go(in, out string) { genRunTmpl2Go(in, out) }
|
||||
func GenRunSortTmpl2Go(in, out string) { genRunSortTmpl2Go(in, out) }
|
||||
EOF
|
||||
|
||||
# stub xxxRv and xxxRvSlice creation, before you create it
|
||||
cat > gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
|
||||
// +build codecgen.sort_slice
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
import "time"
|
||||
|
||||
func GenTmplRun2Go(in, out string) { genTmplRun2Go(in, out) }
|
||||
func GenMonoAll() { genMonoAll() }
|
||||
EOF
|
||||
|
||||
for i in string bool uint64 int64 float64 bytes time; do
|
||||
local i2=$i
|
||||
case $i in
|
||||
'time' ) i2="time.Time";;
|
||||
'bytes' ) i2="[]byte";;
|
||||
esac
|
||||
|
||||
cat >> gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
|
||||
type ${i}Rv struct { v ${i2}; r reflect.Value }
|
||||
|
||||
type ${i}RvSlice []${i}Rv
|
||||
|
||||
func (${i}RvSlice) Len() int { return 0 }
|
||||
func (${i}RvSlice) Less(i, j int) bool { return false }
|
||||
func (${i}RvSlice) Swap(i, j int) {}
|
||||
|
||||
type ${i}Intf struct { v ${i2}; i interface{} }
|
||||
|
||||
type ${i}IntfSlice []${i}Intf
|
||||
|
||||
func (${i}IntfSlice) Len() int { return 0 }
|
||||
func (${i}IntfSlice) Less(i, j int) bool { return false }
|
||||
func (${i}IntfSlice) Swap(i, j int) {}
|
||||
|
||||
cat > gen-from-tmpl.generated.go <<EOF
|
||||
//go:build ignore
|
||||
package main
|
||||
import "${zpkg}"
|
||||
func main() {
|
||||
codec.GenTmplRun2Go("fastpath.go.tmpl", "base.fastpath.generated.go")
|
||||
codec.GenTmplRun2Go("fastpath.notmono.go.tmpl", "base.fastpath.notmono.generated.go")
|
||||
codec.GenTmplRun2Go("mammoth_test.go.tmpl", "mammoth_generated_test.go")
|
||||
codec.GenMonoAll()
|
||||
}
|
||||
EOF
|
||||
done
|
||||
|
||||
sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \
|
||||
shared_test.go > bench/shared_test.go
|
||||
|
||||
# explicitly return 0 if this passes, else return 1
|
||||
local btags="codec.notfastpath codec.safe codecgen.exec"
|
||||
rm -f sort-slice.generated.go fast-path.generated.go gen-helper.generated.go mammoth_generated_test.go mammoth2_generated_test.go
|
||||
|
||||
cat > gen-from-tmpl.sort-slice.generated.go <<EOF
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import "${zpkg}"
|
||||
|
||||
func main() {
|
||||
codec.GenRunSortTmpl2Go("sort-slice.go.tmpl", "sort-slice.generated.go")
|
||||
}
|
||||
EOF
|
||||
|
||||
${gocmd} run -tags "$btags codecgen.sort_slice" gen-from-tmpl.sort-slice.generated.go || return 1
|
||||
rm -f gen-from-tmpl.sort-slice.generated.go
|
||||
|
||||
cat > gen-from-tmpl.generated.go <<EOF
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import "${zpkg}"
|
||||
|
||||
func main() {
|
||||
codec.GenRunTmpl2Go("fast-path.go.tmpl", "fast-path.generated.go")
|
||||
codec.GenRunTmpl2Go("gen-helper.go.tmpl", "gen-helper.generated.go")
|
||||
codec.GenRunTmpl2Go("mammoth-test.go.tmpl", "mammoth_generated_test.go")
|
||||
codec.GenRunTmpl2Go("mammoth2-test.go.tmpl", "mammoth2_generated_test.go")
|
||||
}
|
||||
EOF
|
||||
|
||||
${gocmd} run -tags "$btags" gen-from-tmpl.generated.go || return 1
|
||||
rm -f gen-from-tmpl.generated.go
|
||||
|
||||
rm -f gen-from-tmpl.*generated.go
|
||||
rm -f gen-from-tmpl*.generated.go
|
||||
return 0
|
||||
}
|
||||
|
||||
_codegenerators() {
|
||||
local c5="_generated_test.go"
|
||||
local c7="$PWD/codecgen"
|
||||
local c8="$c7/__codecgen"
|
||||
local c9="codecgen-scratch.go"
|
||||
|
||||
if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi
|
||||
|
||||
# Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen
|
||||
true &&
|
||||
echo "codecgen ... " &&
|
||||
if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then
|
||||
echo "rebuilding codecgen ... " && ( cd codecgen && ${gocmd} build -o $c8 ${zargs[*]} . )
|
||||
fi &&
|
||||
$c8 -rt 'codecgen' -t 'codecgen generated' -o "values_codecgen${c5}" -d 19780 "$zfin" "$zfin2" &&
|
||||
cp mammoth2_generated_test.go $c9 &&
|
||||
$c8 -t 'codecgen,!codec.notfastpath,!codec.notmammoth generated,!codec.notfastpath,!codec.notmammoth' -o "mammoth2_codecgen${c5}" -d 19781 "mammoth2_generated_test.go" &&
|
||||
rm -f $c9 &&
|
||||
echo "generators done!"
|
||||
}
|
||||
|
||||
_prebuild() {
|
||||
echo "prebuild: zforce: $zforce"
|
||||
local d="$PWD"
|
||||
local zfin="test_values.generated.go"
|
||||
local zfin2="test_values_flex.generated.go"
|
||||
|
|
@ -236,13 +65,12 @@ _prebuild() {
|
|||
# zpkg=${d##*/src/}
|
||||
# zgobase=${d%%/src/*}
|
||||
# rm -f *_generated_test.go
|
||||
rm -f codecgen-*.go &&
|
||||
# if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
|
||||
true &&
|
||||
_build &&
|
||||
cp $d/values_test.go $d/$zfin &&
|
||||
cp $d/values_flex_test.go $d/$zfin2 &&
|
||||
_codegenerators &&
|
||||
if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi &&
|
||||
if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
|
||||
returncode=0 &&
|
||||
echo "prebuild done successfully"
|
||||
rm -f $d/$zfin $d/$zfin2
|
||||
|
|
@ -251,54 +79,67 @@ _prebuild() {
|
|||
}
|
||||
|
||||
_make() {
|
||||
local makeforce=${zforce}
|
||||
zforce=1
|
||||
(cd codecgen && ${gocmd} install ${zargs[*]} .) && _prebuild && ${gocmd} install ${zargs[*]} .
|
||||
zforce=${makeforce}
|
||||
_prebuild && ${gocmd} install ${zargs[*]} .
|
||||
}
|
||||
|
||||
_clean() {
|
||||
rm -f \
|
||||
gen-from-tmpl.*generated.go \
|
||||
codecgen-*.go \
|
||||
test_values.generated.go test_values_flex.generated.go
|
||||
}
|
||||
|
||||
_release() {
|
||||
local reply
|
||||
read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply
|
||||
echo
|
||||
if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi
|
||||
_tests_run_one() {
|
||||
local tt="alltests $i"
|
||||
local rr="TestCodecSuite"
|
||||
if [[ "x$i" == "xx" ]]; then tt="codec.notmono codec.notfastpath x"; rr='Test.*X$'; fi
|
||||
local g=( ${zargs[*]} ${ztestargs[*]} -count $nc -cpu $cpus -vet "$vet" -tags "$tt" -run "$rr" )
|
||||
[[ "$zcover" == "1" ]] && g+=( -cover )
|
||||
# g+=( -ti "$k" )
|
||||
g+=( -tdiff )
|
||||
[[ "$zcover" == "1" ]] && g+=( -test.gocoverdir $covdir )
|
||||
local -
|
||||
set -x
|
||||
${gocmd} test "${g[@]}" &
|
||||
}
|
||||
|
||||
# expects GOROOT, GOROOT_BOOTSTRAP to have been set.
|
||||
if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi
|
||||
# (cd $GOROOT && git checkout -f master && git pull && git reset --hard)
|
||||
(cd $GOROOT && git pull)
|
||||
local f=`pwd`/make.release.out
|
||||
cat > $f <<EOF
|
||||
========== `date` ===========
|
||||
EOF
|
||||
# # go 1.6 and below kept giving memory errors on Mac OS X during SDK build or go run execution,
|
||||
# # that is fine, as we only explicitly test the last 3 releases and tip (2 years).
|
||||
local makeforce=${zforce}
|
||||
zforce=1
|
||||
for i in 1.10 1.11 1.12 master
|
||||
do
|
||||
echo "*********** $i ***********" >>$f
|
||||
if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi
|
||||
(false ||
|
||||
(echo "===== BUILDING GO SDK for branch: $i ... =====" &&
|
||||
cd $GOROOT &&
|
||||
git checkout -f $i && git reset --hard && git clean -f . &&
|
||||
cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) &&
|
||||
echo "===== GO SDK BUILD DONE =====" &&
|
||||
_prebuild &&
|
||||
echo "===== PREBUILD DONE with exit: $? =====" &&
|
||||
_tests "$@"
|
||||
if [[ "$?" != 0 ]]; then return 1; fi
|
||||
_tests() {
|
||||
local vet="" # TODO: make it off
|
||||
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
|
||||
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
|
||||
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
|
||||
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
|
||||
case $gover in
|
||||
go1.2[0-9]*|go2.*|devel*) true ;;
|
||||
*) return 1
|
||||
esac
|
||||
# we test the following permutations wnich all execute different code paths as below.
|
||||
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe)"
|
||||
local nc=2 # count
|
||||
local cpus="1,$(nproc)"
|
||||
# if using the race detector, then set nc to
|
||||
if [[ " ${zargs[@]} " =~ "-race" ]]; then
|
||||
cpus="$(nproc)"
|
||||
fi
|
||||
local covdir=""
|
||||
local a=( "" "codec.safe" "codec.notfastpath" "codec.safe codec.notfastpath"
|
||||
"codec.notmono" "codec.notmono codec.safe"
|
||||
"codec.notmono codec.notfastpath" "codec.notmono codec.safe codec.notfastpath" )
|
||||
[[ "$zextra" == "1" ]] && a+=( "x" )
|
||||
[[ "$zcover" == "1" ]] && covdir=`mktemp -d`
|
||||
${gocmd} vet -printfuncs "errorf" "$@" || return 1
|
||||
for i in "${a[@]}"; do
|
||||
local j=${i:-default}; j="${j// /-}"; j="${j//codec./}"
|
||||
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
|
||||
_tests_run_one
|
||||
[[ "$zwait" == "1" ]] && wait
|
||||
# if [[ "$?" != 0 ]]; then return 1; fi
|
||||
done
|
||||
zforce=${makeforce}
|
||||
echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++"
|
||||
wait
|
||||
[[ "$zcover" == "1" ]] &&
|
||||
echo "go tool covdata output" &&
|
||||
${gocmd} tool covdata percent -i $covdir &&
|
||||
${gocmd} tool covdata textfmt -i $covdir -o __cov.out &&
|
||||
${gocmd} tool cover -html=__cov.out
|
||||
}
|
||||
|
||||
_usage() {
|
||||
|
|
@ -306,11 +147,10 @@ _usage() {
|
|||
# -pf [p=prebuild (f=force)]
|
||||
|
||||
cat <<EOF
|
||||
primary usage: $0
|
||||
primary usage: $0
|
||||
-t[esow] -> t=tests [e=extra, s=short, o=cover, w=wait]
|
||||
-[md] -> [m=make, d=race detector]
|
||||
-[n l i] -> [n=inlining diagnostics, l=mid-stack inlining, i=check inlining for path (path)]
|
||||
-v -> v=verbose
|
||||
-v -> v=verbose (more v's to increase verbose level)
|
||||
EOF
|
||||
if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi
|
||||
}
|
||||
|
|
@ -331,15 +171,15 @@ _main() {
|
|||
local gocmd=${MYGOCMD:-go}
|
||||
|
||||
OPTIND=1
|
||||
while getopts ":cetmnrgpfvldsowkxyzi" flag
|
||||
while getopts ":cetmnrgpfvldsowikxyz" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xw') zwait=1 ;;
|
||||
'xv') zverbose+=(1) ;;
|
||||
'xo') zcover=1 ;;
|
||||
'xe') zextra=1 ;;
|
||||
'xw') zwait=1 ;;
|
||||
'xf') zforce=1 ;;
|
||||
'xs') ztestargs+=("-short") ;;
|
||||
'xv') zverbose+=(1) ;;
|
||||
'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;;
|
||||
'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;;
|
||||
'xd') zargs+=("-race") ;;
|
||||
|
|
@ -357,14 +197,23 @@ _main() {
|
|||
'xg') _go ;;
|
||||
'xp') _prebuild "$@" ;;
|
||||
'xc') _clean "$@" ;;
|
||||
esac
|
||||
|
||||
# handle from local run.sh
|
||||
case "x$x" in
|
||||
'xi') _check_inlining_one "$@" ;;
|
||||
'xk') _go_compiler_validation_suite ;;
|
||||
'xx') _analyze_checks "$@" ;;
|
||||
'xy') _analyze_debug_types "$@" ;;
|
||||
'xz') _analyze_do_inlining_and_more "$@" ;;
|
||||
'xk') _go_compiler_validation_suite ;;
|
||||
'xi') _check_inlining_one "$@" ;;
|
||||
esac
|
||||
# unset zforce zargs zbenchflags
|
||||
}
|
||||
|
||||
[ "." = `dirname $0` ] && _main "$@"
|
||||
|
||||
# _xtrace() {
|
||||
# local -
|
||||
# set -x
|
||||
# "${@}"
|
||||
# }
|
||||
|
|
|
|||
160
vendor/github.com/ugorji/go/codec/cbor.base.go
generated
vendored
Normal file
160
vendor/github.com/ugorji/go/codec/cbor.base.go
generated
vendored
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// major
|
||||
const (
|
||||
cborMajorUint byte = iota
|
||||
cborMajorNegInt
|
||||
cborMajorBytes
|
||||
cborMajorString
|
||||
cborMajorArray
|
||||
cborMajorMap
|
||||
cborMajorTag
|
||||
cborMajorSimpleOrFloat
|
||||
)
|
||||
|
||||
// simple
|
||||
const (
|
||||
cborBdFalse byte = 0xf4 + iota
|
||||
cborBdTrue
|
||||
cborBdNil
|
||||
cborBdUndefined
|
||||
cborBdExt
|
||||
cborBdFloat16
|
||||
cborBdFloat32
|
||||
cborBdFloat64
|
||||
)
|
||||
|
||||
// indefinite
|
||||
const (
|
||||
cborBdIndefiniteBytes byte = 0x5f
|
||||
cborBdIndefiniteString byte = 0x7f
|
||||
cborBdIndefiniteArray byte = 0x9f
|
||||
cborBdIndefiniteMap byte = 0xbf
|
||||
cborBdBreak byte = 0xff
|
||||
)
|
||||
|
||||
// These define some in-stream descriptors for
|
||||
// manual encoding e.g. when doing explicit indefinite-length
|
||||
const (
|
||||
CborStreamBytes byte = 0x5f
|
||||
CborStreamString byte = 0x7f
|
||||
CborStreamArray byte = 0x9f
|
||||
CborStreamMap byte = 0xbf
|
||||
CborStreamBreak byte = 0xff
|
||||
)
|
||||
|
||||
// base values
|
||||
const (
|
||||
cborBaseUint byte = 0x00
|
||||
cborBaseNegInt byte = 0x20
|
||||
cborBaseBytes byte = 0x40
|
||||
cborBaseString byte = 0x60
|
||||
cborBaseArray byte = 0x80
|
||||
cborBaseMap byte = 0xa0
|
||||
cborBaseTag byte = 0xc0
|
||||
cborBaseSimple byte = 0xe0
|
||||
)
|
||||
|
||||
// const (
|
||||
// cborSelfDesrTag byte = 0xd9
|
||||
// cborSelfDesrTag2 byte = 0xd9
|
||||
// cborSelfDesrTag3 byte = 0xf7
|
||||
// )
|
||||
|
||||
var (
|
||||
cbordescSimpleNames = map[byte]string{
|
||||
cborBdNil: "nil",
|
||||
cborBdFalse: "false",
|
||||
cborBdTrue: "true",
|
||||
cborBdFloat16: "float",
|
||||
cborBdFloat32: "float",
|
||||
cborBdFloat64: "float",
|
||||
cborBdBreak: "break",
|
||||
}
|
||||
cbordescIndefNames = map[byte]string{
|
||||
cborBdIndefiniteBytes: "bytes*",
|
||||
cborBdIndefiniteString: "string*",
|
||||
cborBdIndefiniteArray: "array*",
|
||||
cborBdIndefiniteMap: "map*",
|
||||
}
|
||||
cbordescMajorNames = map[byte]string{
|
||||
cborMajorUint: "(u)int",
|
||||
cborMajorNegInt: "int",
|
||||
cborMajorBytes: "bytes",
|
||||
cborMajorString: "string",
|
||||
cborMajorArray: "array",
|
||||
cborMajorMap: "map",
|
||||
cborMajorTag: "tag",
|
||||
cborMajorSimpleOrFloat: "simple",
|
||||
}
|
||||
)
|
||||
|
||||
func cbordesc(bd byte) (s string) {
|
||||
bm := bd >> 5
|
||||
if bm == cborMajorSimpleOrFloat {
|
||||
s = cbordescSimpleNames[bd]
|
||||
} else {
|
||||
s = cbordescMajorNames[bm]
|
||||
if s == "" {
|
||||
s = cbordescIndefNames[bd]
|
||||
}
|
||||
}
|
||||
if s == "" {
|
||||
s = "unknown"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------
|
||||
|
||||
// CborHandle is a Handle for the CBOR encoding format,
|
||||
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
|
||||
//
|
||||
// CBOR is comprehensively supported, including support for:
|
||||
// - indefinite-length arrays/maps/bytes/strings
|
||||
// - (extension) tags in range 0..0xffff (0 .. 65535)
|
||||
// - half, single and double-precision floats
|
||||
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
|
||||
// - nil, true, false, ...
|
||||
// - arrays and maps, bytes and text strings
|
||||
//
|
||||
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
|
||||
// Users can implement them as needed (using SetExt), including spec-documented ones:
|
||||
// - timestamp, BigNum, BigFloat, Decimals,
|
||||
// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
|
||||
type CborHandle struct {
|
||||
binaryEncodingType
|
||||
notJsonType
|
||||
// noElemSeparators
|
||||
BasicHandle
|
||||
|
||||
// IndefiniteLength=true, means that we encode using indefinitelength
|
||||
IndefiniteLength bool
|
||||
|
||||
// TimeRFC3339 says to encode time.Time using RFC3339 format.
|
||||
// If unset, we encode time.Time using seconds past epoch.
|
||||
TimeRFC3339 bool
|
||||
|
||||
// SkipUnexpectedTags says to skip over any tags for which extensions are
|
||||
// not defined. This is in keeping with the cbor spec on "Optional Tagging of Items".
|
||||
//
|
||||
// Furthermore, this allows the skipping over of the Self Describing Tag 0xd9d9f7.
|
||||
SkipUnexpectedTags bool
|
||||
}
|
||||
|
||||
// Name returns the name of the handle: cbor
|
||||
func (h *CborHandle) Name() string { return "cbor" }
|
||||
|
||||
func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) }
|
||||
|
||||
// SetInterfaceExt sets an extension
|
||||
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
12482
vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
953
vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
953
vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
File diff suppressed because it is too large
Load diff
7985
vendor/github.com/ugorji/go/codec/cbor.mono.generated.go
generated
vendored
Normal file
7985
vendor/github.com/ugorji/go/codec/cbor.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
52
vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathECborBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderCborBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDCborBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderCborBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsCborBytes [0]fastpathECborBytes
|
||||
type fastpathDsCborBytes [0]fastpathDCborBytes
|
||||
|
||||
func (helperEncDriverCborBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverCborBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverCborBytes) fastpathEList() (v *fastpathEsCborBytes) { return }
|
||||
func (helperDecDriverCborBytes) fastpathDList() (v *fastpathDsCborBytes) { return }
|
||||
|
||||
type fastpathECborIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderCborIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDCborIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderCborIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsCborIO [0]fastpathECborIO
|
||||
type fastpathDsCborIO [0]fastpathDCborIO
|
||||
|
||||
func (helperEncDriverCborIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverCborIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverCborIO) fastpathEList() (v *fastpathEsCborIO) { return }
|
||||
func (helperDecDriverCborIO) fastpathDList() (v *fastpathDsCborIO) { return }
|
||||
17
vendor/github.com/ugorji/go/codec/codecgen.go
generated
vendored
17
vendor/github.com/ugorji/go/codec/codecgen.go
generated
vendored
|
|
@ -1,17 +0,0 @@
|
|||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build codecgen || generated
|
||||
// +build codecgen generated
|
||||
|
||||
package codec
|
||||
|
||||
// this file sets the codecgen variable to true
|
||||
// when the build tag codecgen is set.
|
||||
//
|
||||
// some tests depend on knowing whether in the context of codecgen or not.
|
||||
// For example, some tests should be skipped during codecgen e.g. missing fields tests.
|
||||
|
||||
func init() {
|
||||
codecgen = true
|
||||
}
|
||||
191
vendor/github.com/ugorji/go/codec/custom_time.go
generated
vendored
Normal file
191
vendor/github.com/ugorji/go/codec/custom_time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EncodeTime encodes a time.Time as a []byte, including
|
||||
// information on the instant in time and UTC offset.
|
||||
//
|
||||
// Format Description
|
||||
//
|
||||
// A timestamp is composed of 3 components:
|
||||
//
|
||||
// - secs: signed integer representing seconds since unix epoch
|
||||
// - nsces: unsigned integer representing fractional seconds as a
|
||||
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
|
||||
// - tz: signed integer representing timezone offset in minutes east of UTC,
|
||||
// and a dst (daylight savings time) flag
|
||||
//
|
||||
// When encoding a timestamp, the first byte is the descriptor, which
|
||||
// defines which components are encoded and how many bytes are used to
|
||||
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
|
||||
// is not encoded in the byte array explicitly*.
|
||||
//
|
||||
// Descriptor 8 bits are of the form `A B C DDD EE`:
|
||||
// A: Is secs component encoded? 1 = true
|
||||
// B: Is nsecs component encoded? 1 = true
|
||||
// C: Is tz component encoded? 1 = true
|
||||
// DDD: Number of extra bytes for secs (range 0-7).
|
||||
// If A = 1, secs encoded in DDD+1 bytes.
|
||||
// If A = 0, secs is not encoded, and is assumed to be 0.
|
||||
// If A = 1, then we need at least 1 byte to encode secs.
|
||||
// DDD says the number of extra bytes beyond that 1.
|
||||
// E.g. if DDD=0, then secs is represented in 1 byte.
|
||||
// if DDD=2, then secs is represented in 3 bytes.
|
||||
// EE: Number of extra bytes for nsecs (range 0-3).
|
||||
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
|
||||
//
|
||||
// Following the descriptor bytes, subsequent bytes are:
|
||||
//
|
||||
// secs component encoded in `DDD + 1` bytes (if A == 1)
|
||||
// nsecs component encoded in `EE + 1` bytes (if B == 1)
|
||||
// tz component encoded in 2 bytes (if C == 1)
|
||||
//
|
||||
// secs and nsecs components are integers encoded in a BigEndian
|
||||
// 2-complement encoding format.
|
||||
//
|
||||
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
|
||||
// Least significant bit 0 are described below:
|
||||
//
|
||||
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
|
||||
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
|
||||
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
|
||||
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
|
||||
func customEncodeTime(t time.Time) []byte {
|
||||
// t := rv2i(rv).(time.Time)
|
||||
tsecs, tnsecs := t.Unix(), t.Nanosecond()
|
||||
var (
|
||||
bd byte
|
||||
bs [16]byte
|
||||
i int = 1
|
||||
)
|
||||
l := t.Location()
|
||||
if l == time.UTC {
|
||||
l = nil
|
||||
}
|
||||
if tsecs != 0 {
|
||||
bd = bd | 0x80
|
||||
btmp := bigen.PutUint64(uint64(tsecs))
|
||||
f := pruneSignExt(btmp[:], tsecs >= 0)
|
||||
bd = bd | (byte(7-f) << 2)
|
||||
copy(bs[i:], btmp[f:])
|
||||
i = i + (8 - f)
|
||||
}
|
||||
if tnsecs != 0 {
|
||||
bd = bd | 0x40
|
||||
btmp := bigen.PutUint32(uint32(tnsecs))
|
||||
f := pruneSignExt(btmp[:4], true)
|
||||
bd = bd | byte(3-f)
|
||||
copy(bs[i:], btmp[f:4])
|
||||
i = i + (4 - f)
|
||||
}
|
||||
if l != nil {
|
||||
bd = bd | 0x20
|
||||
// Note that Go Libs do not give access to dst flag.
|
||||
_, zoneOffset := t.Zone()
|
||||
// zoneName, zoneOffset := t.Zone()
|
||||
zoneOffset /= 60
|
||||
z := uint16(zoneOffset)
|
||||
btmp0, btmp1 := bigen.PutUint16(z)
|
||||
// clear dst flags
|
||||
bs[i] = btmp0 & 0x3f
|
||||
bs[i+1] = btmp1
|
||||
i = i + 2
|
||||
}
|
||||
bs[0] = bd
|
||||
return bs[0:i]
|
||||
}
|
||||
|
||||
// customDecodeTime decodes a []byte into a time.Time.
|
||||
func customDecodeTime(bs []byte) (tt time.Time, err error) {
|
||||
bd := bs[0]
|
||||
var (
|
||||
tsec int64
|
||||
tnsec uint32
|
||||
tz uint16
|
||||
i byte = 1
|
||||
i2 byte
|
||||
n byte
|
||||
)
|
||||
if bd&(1<<7) != 0 {
|
||||
var btmp [8]byte
|
||||
n = ((bd >> 2) & 0x7) + 1
|
||||
i2 = i + n
|
||||
copy(btmp[8-n:], bs[i:i2])
|
||||
// if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
|
||||
if bs[i]&(1<<7) != 0 {
|
||||
copy(btmp[0:8-n], bsAll0xff)
|
||||
}
|
||||
i = i2
|
||||
tsec = int64(bigen.Uint64(btmp))
|
||||
}
|
||||
if bd&(1<<6) != 0 {
|
||||
var btmp [4]byte
|
||||
n = (bd & 0x3) + 1
|
||||
i2 = i + n
|
||||
copy(btmp[4-n:], bs[i:i2])
|
||||
i = i2
|
||||
tnsec = bigen.Uint32(btmp)
|
||||
}
|
||||
if bd&(1<<5) == 0 {
|
||||
tt = time.Unix(tsec, int64(tnsec)).UTC()
|
||||
return
|
||||
}
|
||||
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
|
||||
// However, we need name here, so it can be shown when time is printf.d.
|
||||
// Zone name is in form: UTC-08:00.
|
||||
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
|
||||
|
||||
tz = bigen.Uint16([2]byte{bs[i], bs[i+1]})
|
||||
// sign extend sign bit into top 2 MSB (which were dst bits):
|
||||
if tz&(1<<13) == 0 { // positive
|
||||
tz = tz & 0x3fff //clear 2 MSBs: dst bits
|
||||
} else { // negative
|
||||
tz = tz | 0xc000 //set 2 MSBs: dst bits
|
||||
}
|
||||
tzint := int16(tz)
|
||||
if tzint == 0 {
|
||||
tt = time.Unix(tsec, int64(tnsec)).UTC()
|
||||
} else {
|
||||
// For Go Time, do not use a descriptive timezone.
|
||||
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
|
||||
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
|
||||
// var zoneName = timeLocUTCName(tzint)
|
||||
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// customEncodeTimeAsNum encodes time.Time exactly as cbor does.
|
||||
func customEncodeTimeAsNum(t time.Time) (r interface{}) {
|
||||
t = t.UTC().Round(time.Microsecond)
|
||||
sec, nsec := t.Unix(), uint64(t.Nanosecond())
|
||||
if nsec == 0 {
|
||||
r = sec
|
||||
} else {
|
||||
r = float64(sec) + float64(nsec)/1e9
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// customDecodeTimeAsNum decodes time.Time exactly as cbor does.
|
||||
func customDecodeTimeAsNum(v interface{}) (t time.Time) {
|
||||
switch vv := v.(type) {
|
||||
case int64:
|
||||
t = time.Unix(vv, 0)
|
||||
case uint64:
|
||||
t = time.Unix((int64)(vv), 0)
|
||||
case float64:
|
||||
f1, f2 := math.Modf(vv)
|
||||
t = time.Unix(int64(f1), int64(f2*1e9))
|
||||
default:
|
||||
halt.errorf("expect int64/float64 for time.Time ext: got %T", v)
|
||||
}
|
||||
t = t.UTC().Round(time.Microsecond)
|
||||
return
|
||||
}
|
||||
42
vendor/github.com/ugorji/go/codec/decimal.go
generated
vendored
42
vendor/github.com/ugorji/go/codec/decimal.go
generated
vendored
|
|
@ -8,6 +8,19 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
type readFloatResult struct {
|
||||
mantissa uint64
|
||||
exp int8
|
||||
neg bool
|
||||
trunc bool
|
||||
bad bool // bad decimal string
|
||||
hardexp bool // exponent is hard to handle (> 2 digits, etc)
|
||||
ok bool
|
||||
// sawdot bool
|
||||
// sawexp bool
|
||||
//_ [2]bool // padding
|
||||
}
|
||||
|
||||
// Per go spec, floats are represented in memory as
|
||||
// IEEE single or double precision floating point values.
|
||||
//
|
||||
|
|
@ -234,6 +247,10 @@ func parseFloat64_custom(b []byte) (f float64, err error) {
|
|||
}
|
||||
|
||||
func parseUint64_simple(b []byte) (n uint64, ok bool) {
|
||||
if len(b) > 1 && b[0] == '0' { // punt on numbers with leading zeros
|
||||
return
|
||||
}
|
||||
|
||||
var i int
|
||||
var n1 uint64
|
||||
var c uint8
|
||||
|
|
@ -356,19 +373,6 @@ func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
type readFloatResult struct {
|
||||
mantissa uint64
|
||||
exp int8
|
||||
neg bool
|
||||
trunc bool
|
||||
bad bool // bad decimal string
|
||||
hardexp bool // exponent is hard to handle (> 2 digits, etc)
|
||||
ok bool
|
||||
// sawdot bool
|
||||
// sawexp bool
|
||||
//_ [2]bool // padding
|
||||
}
|
||||
|
||||
func readFloat(s []byte, y floatinfo) (r readFloatResult) {
|
||||
var i uint // uint, so that we eliminate bounds checking
|
||||
var slen = uint(len(s))
|
||||
|
|
@ -384,13 +388,23 @@ func readFloat(s []byte, y floatinfo) (r readFloatResult) {
|
|||
i++
|
||||
}
|
||||
|
||||
// we considered punting early if string has length > maxMantDigits, but this doesn't account
|
||||
// considered punting early if string has length > maxMantDigits, but doesn't account
|
||||
// for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
|
||||
|
||||
var nd, ndMant, dp int8
|
||||
var sawdot, sawexp bool
|
||||
var xu uint64
|
||||
|
||||
if i+1 < slen && s[i] == '0' {
|
||||
switch s[i+1] {
|
||||
case '.', 'e', 'E':
|
||||
// ok
|
||||
default:
|
||||
r.bad = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
LOOP:
|
||||
for ; i < slen; i++ {
|
||||
switch s[i] {
|
||||
|
|
|
|||
944
vendor/github.com/ugorji/go/codec/decode.base.go
generated
vendored
Normal file
944
vendor/github.com/ugorji/go/codec/decode.base.go
generated
vendored
Normal file
|
|
@ -0,0 +1,944 @@
|
|||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, v := range []interface{}{
|
||||
(*string)(nil),
|
||||
(*bool)(nil),
|
||||
(*int)(nil),
|
||||
(*int8)(nil),
|
||||
(*int16)(nil),
|
||||
(*int32)(nil),
|
||||
(*int64)(nil),
|
||||
(*uint)(nil),
|
||||
(*uint8)(nil),
|
||||
(*uint16)(nil),
|
||||
(*uint32)(nil),
|
||||
(*uint64)(nil),
|
||||
(*uintptr)(nil),
|
||||
(*float32)(nil),
|
||||
(*float64)(nil),
|
||||
(*complex64)(nil),
|
||||
(*complex128)(nil),
|
||||
(*[]byte)(nil),
|
||||
([]byte)(nil),
|
||||
(*time.Time)(nil),
|
||||
(*Raw)(nil),
|
||||
(*interface{})(nil),
|
||||
} {
|
||||
decBuiltinRtids = append(decBuiltinRtids, i2rtid(v))
|
||||
}
|
||||
slices.Sort(decBuiltinRtids)
|
||||
}
|
||||
|
||||
const msgBadDesc = "unrecognized descriptor byte"
|
||||
|
||||
var decBuiltinRtids []uintptr
|
||||
|
||||
// decDriver calls (DecodeBytes and DecodeStringAsBytes) return a state
|
||||
// of the view they return, allowing consumers to handle appropriately.
|
||||
//
|
||||
// sequencing of this is intentional:
|
||||
// - mutable if <= dBytesAttachBuffer (buf | view | invalid)
|
||||
// - noCopy if >= dBytesAttachViewZerocopy
|
||||
type dBytesAttachState uint8
|
||||
|
||||
const (
|
||||
dBytesAttachInvalid dBytesAttachState = iota
|
||||
dBytesAttachView // (bytes && !zerocopy && !buf)
|
||||
dBytesAttachBuffer // (buf)
|
||||
dBytesAttachViewZerocopy // (bytes && zerocopy && !buf)
|
||||
dBytesDetach // (!bytes && !buf)
|
||||
)
|
||||
|
||||
type dBytesIntoState uint8
|
||||
|
||||
const (
|
||||
dBytesIntoNoChange dBytesIntoState = iota
|
||||
dBytesIntoParamOut
|
||||
dBytesIntoParamOutSlice
|
||||
dBytesIntoNew
|
||||
)
|
||||
|
||||
func (x dBytesAttachState) String() string {
|
||||
switch x {
|
||||
case dBytesAttachInvalid:
|
||||
return "invalid"
|
||||
case dBytesAttachView:
|
||||
return "view"
|
||||
case dBytesAttachBuffer:
|
||||
return "buffer"
|
||||
case dBytesAttachViewZerocopy:
|
||||
return "view-zerocopy"
|
||||
case dBytesDetach:
|
||||
return "detach"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
const (
|
||||
decDefMaxDepth = 1024 // maximum depth
|
||||
decDefChanCap = 64 // should be large, as cap cannot be expanded
|
||||
decScratchByteArrayLen = (4 + 3) * 8 // around cacheLineSize ie ~64, depending on Decoder size
|
||||
|
||||
// MARKER: massage decScratchByteArrayLen to ensure xxxDecDriver structs fit within cacheLine*N
|
||||
|
||||
// decFailNonEmptyIntf configures whether we error
|
||||
// when decoding naked into a non-empty interface.
|
||||
//
|
||||
// Typically, we cannot decode non-nil stream value into
|
||||
// nil interface with methods (e.g. io.Reader).
|
||||
// However, in some scenarios, this should be allowed:
|
||||
// - MapType
|
||||
// - SliceType
|
||||
// - Extensions
|
||||
//
|
||||
// Consequently, we should relax this. Put it behind a const flag for now.
|
||||
decFailNonEmptyIntf = false
|
||||
|
||||
// decUseTransient says whether we should use the transient optimization.
|
||||
//
|
||||
// There's potential for GC corruption or memory overwrites if transient isn't
|
||||
// used carefully, so this flag helps turn it off quickly if needed.
|
||||
//
|
||||
// Use it everywhere needed so we can completely remove unused code blocks.
|
||||
decUseTransient = true
|
||||
)
|
||||
|
||||
var (
|
||||
errNeedMapOrArrayDecodeToStruct = errors.New("only encoded map or array can decode into struct")
|
||||
errCannotDecodeIntoNil = errors.New("cannot decode into nil")
|
||||
|
||||
errExpandSliceCannotChange = errors.New("expand slice: cannot change")
|
||||
|
||||
errDecoderNotInitialized = errors.New("Decoder not initialized")
|
||||
|
||||
errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read")
|
||||
errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
|
||||
errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
|
||||
errMaxDepthExceeded = errors.New("maximum decoding depth exceeded")
|
||||
)
|
||||
|
||||
type decNotDecodeableReason uint8
|
||||
|
||||
const (
|
||||
decNotDecodeableReasonUnknown decNotDecodeableReason = iota
|
||||
decNotDecodeableReasonBadKind
|
||||
decNotDecodeableReasonNonAddrValue
|
||||
decNotDecodeableReasonNilReference
|
||||
)
|
||||
|
||||
type decDriverI interface {
|
||||
|
||||
// this will check if the next token is a break.
|
||||
CheckBreak() bool
|
||||
|
||||
// TryNil tries to decode as nil.
|
||||
// If a nil is in the stream, it consumes it and returns true.
|
||||
//
|
||||
// Note: if TryNil returns true, that must be handled.
|
||||
TryNil() bool
|
||||
|
||||
// ContainerType returns one of: Bytes, String, Nil, Slice or Map.
|
||||
//
|
||||
// Return unSet if not known.
|
||||
//
|
||||
// Note: Implementations MUST fully consume sentinel container types, specifically Nil.
|
||||
ContainerType() (vt valueType)
|
||||
|
||||
// DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
|
||||
// For maps and arrays, it will not do the decoding in-band, but will signal
|
||||
// the decoder, so that is done later, by setting the fauxUnion.valueType field.
|
||||
//
|
||||
// Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
|
||||
// for extensions, DecodeNaked must read the tag and the []byte if it exists.
|
||||
// if the []byte is not read, then kInterfaceNaked will treat it as a Handle
|
||||
// that stores the subsequent value in-band, and complete reading the RawExt.
|
||||
//
|
||||
// extensions should also use readx to decode them, for efficiency.
|
||||
// kInterface will extract the detached byte slice if it has to pass it outside its realm.
|
||||
DecodeNaked()
|
||||
|
||||
DecodeInt64() (i int64)
|
||||
DecodeUint64() (ui uint64)
|
||||
|
||||
DecodeFloat32() (f float32)
|
||||
DecodeFloat64() (f float64)
|
||||
|
||||
DecodeBool() (b bool)
|
||||
|
||||
// DecodeStringAsBytes returns the bytes representing a string.
|
||||
// It will return a view into scratch buffer or input []byte (if applicable).
|
||||
//
|
||||
// Note: This can also decode symbols, if supported.
|
||||
//
|
||||
// Users should consume it right away and not store it for later use.
|
||||
DecodeStringAsBytes() (v []byte, state dBytesAttachState)
|
||||
|
||||
// DecodeBytes returns the bytes representing a binary value.
|
||||
// It will return a view into scratch buffer or input []byte (if applicable).
|
||||
DecodeBytes() (out []byte, state dBytesAttachState)
|
||||
// DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
|
||||
|
||||
// DecodeExt will decode into an extension.
|
||||
// ext is never nil.
|
||||
DecodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
|
||||
// decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
|
||||
|
||||
// DecodeRawExt will decode into a *RawExt
|
||||
DecodeRawExt(re *RawExt)
|
||||
|
||||
DecodeTime() (t time.Time)
|
||||
|
||||
// ReadArrayStart will return the length of the array.
|
||||
// If the format doesn't prefix the length, it returns containerLenUnknown.
|
||||
// If the expected array was a nil in the stream, it returns containerLenNil.
|
||||
ReadArrayStart() int
|
||||
|
||||
// ReadMapStart will return the length of the array.
|
||||
// If the format doesn't prefix the length, it returns containerLenUnknown.
|
||||
// If the expected array was a nil in the stream, it returns containerLenNil.
|
||||
ReadMapStart() int
|
||||
|
||||
decDriverContainerTracker
|
||||
|
||||
reset()
|
||||
|
||||
// atEndOfDecode()
|
||||
|
||||
// nextValueBytes will return the bytes representing the next value in the stream.
|
||||
// It generally will include the last byte read, as that is a part of the next value
|
||||
// in the stream.
|
||||
nextValueBytes() []byte
|
||||
|
||||
// descBd will describe the token descriptor that signifies what type was decoded
|
||||
descBd() string
|
||||
|
||||
// isBytes() bool
|
||||
|
||||
resetInBytes(in []byte)
|
||||
resetInIO(r io.Reader)
|
||||
|
||||
NumBytesRead() int
|
||||
|
||||
init(h Handle, shared *decoderBase, dec decoderI) (fp interface{})
|
||||
|
||||
// driverStateManager
|
||||
decNegintPosintFloatNumber
|
||||
}
|
||||
|
||||
type decInit2er struct{}
|
||||
|
||||
func (decInit2er) init2(dec decoderI) {}
|
||||
|
||||
type decDriverContainerTracker interface {
|
||||
ReadArrayElem(firstTime bool)
|
||||
ReadMapElemKey(firstTime bool)
|
||||
ReadMapElemValue()
|
||||
ReadArrayEnd()
|
||||
ReadMapEnd()
|
||||
}
|
||||
|
||||
type decNegintPosintFloatNumber interface {
|
||||
decInteger() (ui uint64, neg, ok bool)
|
||||
decFloat() (f float64, ok bool)
|
||||
}
|
||||
|
||||
type decDriverNoopNumberHelper struct{}
|
||||
|
||||
func (x decDriverNoopNumberHelper) decInteger() (ui uint64, neg, ok bool) {
|
||||
panic("decInteger unsupported")
|
||||
}
|
||||
func (x decDriverNoopNumberHelper) decFloat() (f float64, ok bool) { panic("decFloat unsupported") }
|
||||
|
||||
type decDriverNoopContainerReader struct{}
|
||||
|
||||
func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { panic("ReadArrayStart unsupported") }
|
||||
func (x decDriverNoopContainerReader) ReadMapStart() (v int) { panic("ReadMapStart unsupported") }
|
||||
func (x decDriverNoopContainerReader) ReadArrayEnd() {}
|
||||
func (x decDriverNoopContainerReader) ReadMapEnd() {}
|
||||
func (x decDriverNoopContainerReader) ReadArrayElem(firstTime bool) {}
|
||||
func (x decDriverNoopContainerReader) ReadMapElemKey(firstTime bool) {}
|
||||
func (x decDriverNoopContainerReader) ReadMapElemValue() {}
|
||||
func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return }
|
||||
|
||||
// ----
|
||||
|
||||
type decFnInfo struct {
|
||||
ti *typeInfo
|
||||
xfFn Ext
|
||||
xfTag uint64
|
||||
addrD bool // decoding into a pointer is preferred
|
||||
addrDf bool // force: if addrD, then decode function MUST take a ptr
|
||||
}
|
||||
|
||||
// DecodeOptions captures configuration options during decode.
|
||||
type DecodeOptions struct {
|
||||
// MapType specifies type to use during schema-less decoding of a map in the stream.
|
||||
// If nil (unset), we default to map[string]interface{} iff json handle and MapKeyAsString=true,
|
||||
// else map[interface{}]interface{}.
|
||||
MapType reflect.Type
|
||||
|
||||
// SliceType specifies type to use during schema-less decoding of an array in the stream.
|
||||
// If nil (unset), we default to []interface{} for all formats.
|
||||
SliceType reflect.Type
|
||||
|
||||
// MaxInitLen defines the maxinum initial length that we "make" a collection
|
||||
// (string, slice, map, chan). If 0 or negative, we default to a sensible value
|
||||
// based on the size of an element in the collection.
|
||||
//
|
||||
// For example, when decoding, a stream may say that it has 2^64 elements.
|
||||
// We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
|
||||
// Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
|
||||
MaxInitLen int
|
||||
|
||||
// ReaderBufferSize is the size of the buffer used when reading.
|
||||
//
|
||||
// if > 0, we use a smart buffer internally for performance purposes.
|
||||
ReaderBufferSize int
|
||||
|
||||
// MaxDepth defines the maximum depth when decoding nested
|
||||
// maps and slices. If 0 or negative, we default to a suitably large number (currently 1024).
|
||||
MaxDepth int16
|
||||
|
||||
// If ErrorIfNoField, return an error when decoding a map
|
||||
// from a codec stream into a struct, and no matching struct field is found.
|
||||
ErrorIfNoField bool
|
||||
|
||||
// If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
|
||||
// For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
|
||||
// or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
|
||||
ErrorIfNoArrayExpand bool
|
||||
|
||||
// If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
|
||||
SignedInteger bool
|
||||
|
||||
// MapValueReset controls how we decode into a map value.
|
||||
//
|
||||
// By default, we MAY retrieve the mapping for a key, and then decode into that.
|
||||
// However, especially with big maps, that retrieval may be expensive and unnecessary
|
||||
// if the stream already contains all that is necessary to recreate the value.
|
||||
//
|
||||
// If true, we will never retrieve the previous mapping,
|
||||
// but rather decode into a new value and set that in the map.
|
||||
//
|
||||
// If false, we will retrieve the previous mapping if necessary e.g.
|
||||
// the previous mapping is a pointer, or is a struct or array with pre-set state,
|
||||
// or is an interface.
|
||||
MapValueReset bool
|
||||
|
||||
// SliceElementReset: on decoding a slice, reset the element to a zero value first.
|
||||
//
|
||||
// concern: if the slice already contained some garbage, we will decode into that garbage.
|
||||
SliceElementReset bool
|
||||
|
||||
// InterfaceReset controls how we decode into an interface.
|
||||
//
|
||||
// By default, when we see a field that is an interface{...},
|
||||
// or a map with interface{...} value, we will attempt decoding into the
|
||||
// "contained" value.
|
||||
//
|
||||
// However, this prevents us from reading a string into an interface{}
|
||||
// that formerly contained a number.
|
||||
//
|
||||
// If true, we will decode into a new "blank" value, and set that in the interface.
|
||||
// If false, we will decode into whatever is contained in the interface.
|
||||
InterfaceReset bool
|
||||
|
||||
// InternString controls interning of strings during decoding.
|
||||
//
|
||||
// Some handles, e.g. json, typically will read map keys as strings.
|
||||
// If the set of keys are finite, it may help reduce allocation to
|
||||
// look them up from a map (than to allocate them afresh).
|
||||
//
|
||||
// Note: Handles will be smart when using the intern functionality.
|
||||
// Every string should not be interned.
|
||||
// An excellent use-case for interning is struct field names,
|
||||
// or map keys where key type is string.
|
||||
InternString bool
|
||||
|
||||
// PreferArrayOverSlice controls whether to decode to an array or a slice.
|
||||
//
|
||||
// This only impacts decoding into a nil interface{}.
|
||||
//
|
||||
// Consequently, it has no effect on codecgen.
|
||||
//
|
||||
// *Note*: This only applies if using go1.5 and above,
|
||||
// as it requires reflect.ArrayOf support which was absent before go1.5.
|
||||
PreferArrayOverSlice bool
|
||||
|
||||
// DeleteOnNilMapValue controls how to decode a nil value in the stream.
|
||||
//
|
||||
// If true, we will delete the mapping of the key.
|
||||
// Else, just set the mapping to the zero value of the type.
|
||||
//
|
||||
// Deprecated: This does NOTHING and is left behind for compiling compatibility.
|
||||
// This change is necessitated because 'nil' in a stream now consistently
|
||||
// means the zero value (ie reset the value to its zero state).
|
||||
DeleteOnNilMapValue bool
|
||||
|
||||
// RawToString controls how raw bytes in a stream are decoded into a nil interface{}.
|
||||
// By default, they are decoded as []byte, but can be decoded as string (if configured).
|
||||
RawToString bool
|
||||
|
||||
// ZeroCopy controls whether decoded values of []byte or string type
|
||||
// point into the input []byte parameter passed to a NewDecoderBytes/ResetBytes(...) call.
|
||||
//
|
||||
// To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer),
|
||||
// then a []byte or string in the output result may just be a slice of (point into)
|
||||
// the input bytes.
|
||||
//
|
||||
// This optimization prevents unnecessary copying.
|
||||
//
|
||||
// However, it is made optional, as the caller MUST ensure that the input parameter []byte is
|
||||
// not modified after the Decode() happens, as any changes are mirrored in the decoded result.
|
||||
ZeroCopy bool
|
||||
|
||||
// PreferPointerForStructOrArray controls whether a struct or array
|
||||
// is stored in a nil interface{}, or a pointer to it.
|
||||
//
|
||||
// This mostly impacts when we decode registered extensions.
|
||||
PreferPointerForStructOrArray bool
|
||||
|
||||
// ValidateUnicode controls will cause decoding to fail if an expected unicode
|
||||
// string is well-formed but include invalid codepoints.
|
||||
//
|
||||
// This could have a performance impact.
|
||||
ValidateUnicode bool
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
|
||||
type decoderBase struct {
|
||||
perType decPerType
|
||||
|
||||
h *BasicHandle
|
||||
|
||||
rtidFn, rtidFnNoExt *atomicRtidFnSlice
|
||||
|
||||
buf []byte
|
||||
|
||||
// used for interning strings
|
||||
is internerMap
|
||||
|
||||
err error
|
||||
|
||||
// sd decoderI
|
||||
|
||||
blist bytesFreeList
|
||||
|
||||
mtr bool // is maptype a known type?
|
||||
str bool // is slicetype a known type?
|
||||
jsms bool // is json handle, and MapKeyAsString
|
||||
|
||||
bytes bool // uses a bytes reader
|
||||
bufio bool // uses a ioDecReader with buffer size > 0
|
||||
|
||||
// ---- cpu cache line boundary?
|
||||
// ---- writable fields during execution --- *try* to keep in sep cache line
|
||||
maxdepth int16
|
||||
depth int16
|
||||
|
||||
// Extensions can call Decode() within a current Decode() call.
|
||||
// We need to know when the top level Decode() call returns,
|
||||
// so we can decide whether to Release() or not.
|
||||
calls uint16 // what depth in mustDecode are we in now.
|
||||
|
||||
c containerState
|
||||
|
||||
// decByteState
|
||||
|
||||
n fauxUnion
|
||||
|
||||
// b is an always-available scratch buffer used by Decoder and decDrivers.
|
||||
// By being always-available, it can be used for one-off things without
|
||||
// having to get from freelist, use, and return back to freelist.
|
||||
//
|
||||
// Use it for a narrow set of things e.g.
|
||||
// - binc uses it for parsing numbers, represented at 8 or less bytes
|
||||
// - uses as potential buffer for struct field names
|
||||
b [decScratchByteArrayLen]byte
|
||||
|
||||
hh Handle
|
||||
// cache the mapTypeId and sliceTypeId for faster comparisons
|
||||
mtid uintptr
|
||||
stid uintptr
|
||||
}
|
||||
|
||||
func (d *decoderBase) maxInitLen() uint {
|
||||
return uint(max(1024, d.h.MaxInitLen))
|
||||
}
|
||||
|
||||
func (d *decoderBase) naked() *fauxUnion {
|
||||
return &d.n
|
||||
}
|
||||
|
||||
func (d *decoderBase) fauxUnionReadRawBytes(dr decDriverI, asString, rawToString bool) { //, handleZeroCopy bool) {
|
||||
// fauxUnion is only used within DecodeNaked calls; consequently, we should try to intern.
|
||||
d.n.l, d.n.a = dr.DecodeBytes()
|
||||
if asString || rawToString {
|
||||
d.n.v = valueTypeString
|
||||
d.n.s = d.detach2Str(d.n.l, d.n.a)
|
||||
} else {
|
||||
d.n.v = valueTypeBytes
|
||||
d.n.l = d.detach2Bytes(d.n.l, d.n.a)
|
||||
}
|
||||
}
|
||||
|
||||
// Return a fixed (detached) string representation of a []byte.
|
||||
//
|
||||
// Possibly get an interned version of a string,
|
||||
// iff InternString=true and decoding a map key.
|
||||
//
|
||||
// This should mostly be used for map keys, struct field names, etc
|
||||
// where the key type is string. This is because keys of a map/struct are
|
||||
// typically reused across many objects.
|
||||
func (d *decoderBase) detach2Str(v []byte, state dBytesAttachState) (s string) {
|
||||
// note: string([]byte) checks - and optimizes - for len 0 and len 1
|
||||
if len(v) <= 1 {
|
||||
s = string(v)
|
||||
} else if state >= dBytesAttachViewZerocopy { // !scratchBuf && d.bytes && d.h.ZeroCopy
|
||||
s = stringView(v)
|
||||
} else if d.is == nil || d.c != containerMapKey || len(v) > internMaxStrLen {
|
||||
s = string(v)
|
||||
} else {
|
||||
s = d.is.string(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoderBase) usableStructFieldNameBytes(buf, v []byte, state dBytesAttachState) (out []byte) {
|
||||
// In JSON, mapElemValue reads a colon and spaces.
|
||||
// In bufio mode of ioDecReader, fillbuf could overwrite the read buffer
|
||||
// which readXXX() calls return sub-slices from.
|
||||
//
|
||||
// Consequently, we detach the bytes in this special case.
|
||||
//
|
||||
// Note: ioDecReader (non-bufio) and bytesDecReader do not have
|
||||
// this issue (as no fillbuf exists where bytes might be returned from).
|
||||
if d.bufio && d.h.jsonHandle && state < dBytesAttachViewZerocopy {
|
||||
if cap(buf) > len(v) {
|
||||
out = buf[:len(v)]
|
||||
} else if len(d.b) > len(v) {
|
||||
out = d.b[:len(v)]
|
||||
} else {
|
||||
out = make([]byte, len(v), max(64, len(v)))
|
||||
}
|
||||
copy(out, v)
|
||||
return
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (d *decoderBase) detach2Bytes(in []byte, state dBytesAttachState) (out []byte) {
|
||||
if cap(in) == 0 || state >= dBytesAttachViewZerocopy {
|
||||
return in
|
||||
}
|
||||
if len(in) == 0 {
|
||||
return zeroByteSlice
|
||||
}
|
||||
out = make([]byte, len(in))
|
||||
copy(out, in)
|
||||
return out
|
||||
}
|
||||
|
||||
func (d *decoderBase) attachState(usingBufFromReader bool) (r dBytesAttachState) {
|
||||
if usingBufFromReader {
|
||||
r = dBytesAttachBuffer
|
||||
} else if !d.bytes {
|
||||
r = dBytesDetach
|
||||
} else if d.h.ZeroCopy {
|
||||
r = dBytesAttachViewZerocopy
|
||||
} else {
|
||||
r = dBytesAttachView
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoderBase) mapStart(v int) int {
|
||||
if v != containerLenNil {
|
||||
d.depthIncr()
|
||||
d.c = containerMapStart
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (d *decoderBase) HandleName() string {
|
||||
return d.hh.Name()
|
||||
}
|
||||
|
||||
func (d *decoderBase) isBytes() bool {
|
||||
return d.bytes
|
||||
}
|
||||
|
||||
type decoderI interface {
|
||||
Decode(v interface{}) (err error)
|
||||
HandleName() string
|
||||
MustDecode(v interface{})
|
||||
NumBytesRead() int
|
||||
Release() // deprecated
|
||||
Reset(r io.Reader)
|
||||
ResetBytes(in []byte)
|
||||
ResetString(s string)
|
||||
|
||||
isBytes() bool
|
||||
wrapErr(v error, err *error)
|
||||
swallow()
|
||||
|
||||
nextValueBytes() []byte // wrapper method, for use in tests
|
||||
// getDecDriver() decDriverI
|
||||
|
||||
decode(v interface{})
|
||||
decodeAs(v interface{}, t reflect.Type, ext bool)
|
||||
|
||||
interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt)
|
||||
}
|
||||
|
||||
var errDecNoResetBytesWithReader = errors.New("cannot reset an Decoder reading from []byte with a io.Reader")
|
||||
var errDecNoResetReaderWithBytes = errors.New("cannot reset an Decoder reading from io.Reader with a []byte")
|
||||
|
||||
func setZero(iv interface{}) {
|
||||
rv, isnil := isNil(iv, false)
|
||||
if isnil {
|
||||
return
|
||||
}
|
||||
if !rv.IsValid() {
|
||||
rv = reflect.ValueOf(iv)
|
||||
}
|
||||
if isnilBitset.isset(byte(rv.Kind())) && rvIsNil(rv) {
|
||||
return
|
||||
}
|
||||
// var canDecode bool
|
||||
switch v := iv.(type) {
|
||||
case *string:
|
||||
*v = ""
|
||||
case *bool:
|
||||
*v = false
|
||||
case *int:
|
||||
*v = 0
|
||||
case *int8:
|
||||
*v = 0
|
||||
case *int16:
|
||||
*v = 0
|
||||
case *int32:
|
||||
*v = 0
|
||||
case *int64:
|
||||
*v = 0
|
||||
case *uint:
|
||||
*v = 0
|
||||
case *uint8:
|
||||
*v = 0
|
||||
case *uint16:
|
||||
*v = 0
|
||||
case *uint32:
|
||||
*v = 0
|
||||
case *uint64:
|
||||
*v = 0
|
||||
case *float32:
|
||||
*v = 0
|
||||
case *float64:
|
||||
*v = 0
|
||||
case *complex64:
|
||||
*v = 0
|
||||
case *complex128:
|
||||
*v = 0
|
||||
case *[]byte:
|
||||
*v = nil
|
||||
case *Raw:
|
||||
*v = nil
|
||||
case *time.Time:
|
||||
*v = time.Time{}
|
||||
case reflect.Value:
|
||||
decSetNonNilRV2Zero(v)
|
||||
default:
|
||||
if !fastpathDecodeSetZeroTypeSwitch(iv) {
|
||||
decSetNonNilRV2Zero(rv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decSetNonNilRV2Zero will set the non-nil value to its zero value.
|
||||
func decSetNonNilRV2Zero(v reflect.Value) {
|
||||
// If not decodeable (settable), we do not touch it.
|
||||
// We considered empty'ing it if not decodeable e.g.
|
||||
// - if chan, drain it
|
||||
// - if map, clear it
|
||||
// - if slice or array, zero all elements up to len
|
||||
//
|
||||
// However, we decided instead that we either will set the
|
||||
// whole value to the zero value, or leave AS IS.
|
||||
|
||||
k := v.Kind()
|
||||
if k == reflect.Interface {
|
||||
decSetNonNilRV2Zero4Intf(v)
|
||||
} else if k == reflect.Ptr {
|
||||
decSetNonNilRV2Zero4Ptr(v)
|
||||
} else if v.CanSet() {
|
||||
rvSetDirectZero(v)
|
||||
}
|
||||
}
|
||||
|
||||
func decSetNonNilRV2Zero4Ptr(v reflect.Value) {
|
||||
ve := v.Elem()
|
||||
if ve.CanSet() {
|
||||
rvSetZero(ve) // we can have a pointer to an interface
|
||||
} else if v.CanSet() {
|
||||
rvSetZero(v)
|
||||
}
|
||||
}
|
||||
|
||||
func decSetNonNilRV2Zero4Intf(v reflect.Value) {
|
||||
ve := v.Elem()
|
||||
if ve.CanSet() {
|
||||
rvSetDirectZero(ve) // interfaces always have element as a non-interface
|
||||
} else if v.CanSet() {
|
||||
rvSetZero(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoderBase) arrayCannotExpand(sliceLen, streamLen int) {
|
||||
if d.h.ErrorIfNoArrayExpand {
|
||||
halt.errorf("cannot expand array len during decode from %v to %v", any(sliceLen), any(streamLen))
|
||||
}
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (d *decoderBase) haltAsNotDecodeable(rv reflect.Value) {
|
||||
if !rv.IsValid() {
|
||||
halt.onerror(errCannotDecodeIntoNil)
|
||||
}
|
||||
// check if an interface can be retrieved, before grabbing an interface
|
||||
if !rv.CanInterface() {
|
||||
halt.errorf("cannot decode into a value without an interface: %v", rv)
|
||||
}
|
||||
halt.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv))
|
||||
}
|
||||
|
||||
func (d *decoderBase) depthIncr() {
|
||||
d.depth++
|
||||
if d.depth >= d.maxdepth {
|
||||
halt.onerror(errMaxDepthExceeded)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoderBase) depthDecr() {
|
||||
d.depth--
|
||||
}
|
||||
|
||||
func (d *decoderBase) arrayStart(v int) int {
|
||||
if v != containerLenNil {
|
||||
d.depthIncr()
|
||||
d.c = containerArrayStart
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (d *decoderBase) oneShotAddrRV(rvt reflect.Type, rvk reflect.Kind) reflect.Value {
|
||||
// MARKER 2025: is this slow for calling oneShot?
|
||||
if decUseTransient && d.h.getTypeInfo4RT(baseRT(rvt)).flagCanTransient {
|
||||
return d.perType.TransientAddrK(rvt, rvk)
|
||||
}
|
||||
return rvZeroAddrK(rvt, rvk)
|
||||
}
|
||||
|
||||
// decNegintPosintFloatNumberHelper is used for formats that are binary
|
||||
// and have distinct ways of storing positive integers vs negative integers
|
||||
// vs floats, which are uniquely identified by the byte descriptor.
|
||||
//
|
||||
// Currently, these formats are binc, cbor and simple.
|
||||
type decNegintPosintFloatNumberHelper struct {
|
||||
d decDriverI
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) uint64(ui uint64, neg, ok bool) uint64 {
|
||||
if ok && !neg {
|
||||
return ui
|
||||
}
|
||||
return x.uint64TryFloat(ok)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) uint64TryFloat(neg bool) (ui uint64) {
|
||||
if neg { // neg = true
|
||||
halt.errorStr("assigning negative signed value to unsigned type")
|
||||
}
|
||||
f, ok := x.d.decFloat()
|
||||
if !(ok && f >= 0 && noFrac64(math.Float64bits(f))) {
|
||||
halt.errorStr2("invalid number loading uint64, with descriptor: ", x.d.descBd())
|
||||
}
|
||||
return uint64(f)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) int64(ui uint64, neg, ok, cbor bool) (i int64) {
|
||||
if ok {
|
||||
return decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor)
|
||||
}
|
||||
// return x.int64TryFloat()
|
||||
// }
|
||||
// func (x decNegintPosintFloatNumberHelper) int64TryFloat() (i int64) {
|
||||
f, ok := x.d.decFloat()
|
||||
if !(ok && noFrac64(math.Float64bits(f))) {
|
||||
halt.errorf("invalid number loading uint64 (%v), with descriptor: %s", f, x.d.descBd())
|
||||
}
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) float64(f float64, ok, cbor bool) float64 {
|
||||
if ok {
|
||||
return f
|
||||
}
|
||||
return x.float64TryInteger(cbor)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) float64TryInteger(cbor bool) float64 {
|
||||
ui, neg, ok := x.d.decInteger()
|
||||
if !ok {
|
||||
halt.errorStr2("invalid descriptor for float: ", x.d.descBd())
|
||||
}
|
||||
return float64(decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor))
|
||||
}
|
||||
|
||||
func decNegintPosintFloatNumberHelperInt64v(ui uint64, neg, incrIfNeg bool) (i int64) {
|
||||
if neg && incrIfNeg {
|
||||
ui++
|
||||
}
|
||||
i = chkOvf.SignedIntV(ui)
|
||||
if neg {
|
||||
i = -i
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isDecodeable checks if value can be decoded into
|
||||
//
|
||||
// decode can take any reflect.Value that is a inherently addressable i.e.
|
||||
// - non-nil chan (we will SEND to it)
|
||||
// - non-nil slice (we will set its elements)
|
||||
// - non-nil map (we will put into it)
|
||||
// - non-nil pointer (we can "update" it)
|
||||
// - func: no
|
||||
// - interface: no
|
||||
// - array: if canAddr=true
|
||||
// - any other value pointer: if canAddr=true
|
||||
func isDecodeable(rv reflect.Value) (canDecode bool, reason decNotDecodeableReason) {
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map:
|
||||
canDecode = !rvIsNil(rv)
|
||||
reason = decNotDecodeableReasonNilReference
|
||||
case reflect.Func, reflect.Interface, reflect.Invalid, reflect.UnsafePointer:
|
||||
reason = decNotDecodeableReasonBadKind
|
||||
default:
|
||||
canDecode = rv.CanAddr()
|
||||
reason = decNotDecodeableReasonNonAddrValue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// decInferLen will infer a sensible length, given the following:
|
||||
// - clen: length wanted.
|
||||
// - maxlen: max length to be returned.
|
||||
// if <= 0, it is unset, and we infer it based on the unit size
|
||||
// - unit: number of bytes for each element of the collection
|
||||
func decInferLen(clen int, maxlen, unit uint) (n uint) {
|
||||
// anecdotal testing showed increase in allocation with map length of 16.
|
||||
// We saw same typical alloc from 0-8, then a 20% increase at 16.
|
||||
// Thus, we set it to 8.
|
||||
|
||||
const (
|
||||
minLenIfUnset = 8
|
||||
maxMem = 1024 * 1024 // 1 MB Memory
|
||||
)
|
||||
|
||||
// handle when maxlen is not set i.e. <= 0
|
||||
|
||||
// clen==0: use 0
|
||||
// maxlen<=0, clen<0: use default
|
||||
// maxlen> 0, clen<0: use default
|
||||
// maxlen<=0, clen>0: infer maxlen, and cap on it
|
||||
// maxlen> 0, clen>0: cap at maxlen
|
||||
|
||||
if clen == 0 || clen == containerLenNil {
|
||||
return 0
|
||||
}
|
||||
if clen < 0 {
|
||||
// if unspecified, return 64 for bytes, ... 8 for uint64, ... and everything else
|
||||
return max(64/unit, minLenIfUnset)
|
||||
}
|
||||
if unit == 0 {
|
||||
return uint(clen)
|
||||
}
|
||||
if maxlen == 0 {
|
||||
maxlen = maxMem / unit
|
||||
}
|
||||
return min(uint(clen), maxlen)
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
decoderI
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
|
||||
//
|
||||
// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle
|
||||
// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer).
|
||||
func NewDecoder(r io.Reader, h Handle) *Decoder {
|
||||
return &Decoder{h.newDecoder(r)}
|
||||
}
|
||||
|
||||
// NewDecoderBytes returns a Decoder which efficiently decodes directly
|
||||
// from a byte slice with zero copying.
|
||||
func NewDecoderBytes(in []byte, h Handle) *Decoder {
|
||||
return &Decoder{h.newDecoderBytes(in)}
|
||||
}
|
||||
|
||||
// NewDecoderString returns a Decoder which efficiently decodes directly
|
||||
// from a string with zero copying.
|
||||
//
|
||||
// It is a convenience function that calls NewDecoderBytes with a
|
||||
// []byte view into the string.
|
||||
//
|
||||
// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag.
|
||||
func NewDecoderString(s string, h Handle) *Decoder {
|
||||
return NewDecoderBytes(bytesView(s), h)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
func sideDecode(h Handle, p *sync.Pool, fn func(decoderI)) {
|
||||
var s decoderI
|
||||
if usePoolForSideDecode {
|
||||
s = p.Get().(decoderI)
|
||||
defer p.Put(s)
|
||||
} else {
|
||||
// initialization cycle error
|
||||
// s = NewDecoderBytes(nil, h).decoderI
|
||||
s = p.New().(decoderI)
|
||||
}
|
||||
fn(s)
|
||||
}
|
||||
|
||||
func oneOffDecode(sd decoderI, v interface{}, in []byte, basetype reflect.Type, ext bool) {
|
||||
sd.ResetBytes(in)
|
||||
sd.decodeAs(v, basetype, ext)
|
||||
// d.sideDecoder(xbs)
|
||||
// d.sideDecode(rv, basetype)
|
||||
}
|
||||
|
||||
func bytesOKdbi(v []byte, _ dBytesIntoState) []byte {
|
||||
return v
|
||||
}
|
||||
|
||||
func bytesOKs(bs []byte, _ dBytesAttachState) []byte {
|
||||
return bs
|
||||
}
|
||||
2166
vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
2166
vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
File diff suppressed because it is too large
Load diff
116
vendor/github.com/ugorji/go/codec/doc.go
generated
vendored
116
vendor/github.com/ugorji/go/codec/doc.go
generated
vendored
|
|
@ -12,7 +12,7 @@ Supported Serialization formats are:
|
|||
- binc: http://github.com/ugorji/binc
|
||||
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
|
||||
- json: http://json.org http://tools.ietf.org/html/rfc7159
|
||||
- simple:
|
||||
- simple: (unpublished)
|
||||
|
||||
This package will carefully use 'package unsafe' for performance reasons in specific places.
|
||||
You can build without unsafe use by passing the safe or appengine tag
|
||||
|
|
@ -78,6 +78,32 @@ Rich Feature Set includes:
|
|||
msgpack-rpc protocol defined at:
|
||||
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
|
||||
# Supported build tags
|
||||
|
||||
We gain performance by code-generating fast-paths for slices and maps of built-in types,
|
||||
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
|
||||
|
||||
The results are 20-40% performance improvements.
|
||||
|
||||
Building and running is configured using build tags as below.
|
||||
|
||||
At runtime:
|
||||
|
||||
- codec.safe: run in safe mode (not using unsafe optimizations)
|
||||
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
|
||||
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
|
||||
|
||||
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
|
||||
Pls use these mostly during development - use codec.XXX in your go files.
|
||||
|
||||
Build only:
|
||||
|
||||
- codec.build: used to generate fastpath and monomorphization code
|
||||
|
||||
Test only:
|
||||
|
||||
- codec.notmammoth: skip the mammoth generated tests
|
||||
|
||||
# Extension Support
|
||||
|
||||
Users can register a function to handle the encoding or decoding of
|
||||
|
|
@ -203,6 +229,10 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
|
|||
go test -tags codec.safe -run Json
|
||||
go test -tags "alltests codec.safe" -run Suite
|
||||
|
||||
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
|
||||
|
||||
go test -tags codec.notmono -run Json
|
||||
|
||||
Running Benchmarks
|
||||
|
||||
cd bench
|
||||
|
|
@ -225,3 +255,87 @@ Embedded fields are encoded as if they exist in the top-level struct,
|
|||
with some caveats. See Encode documentation.
|
||||
*/
|
||||
package codec
|
||||
|
||||
/*
|
||||
Generics
|
||||
|
||||
Generics are used across to board to reduce boilerplate, and hopefully
|
||||
improve performance by
|
||||
- reducing need for interface calls (de-virtualization)
|
||||
- resultant inlining of those calls
|
||||
|
||||
encoder/decoder --> Driver (json/cbor/...) --> input/output (bytes or io abstraction)
|
||||
|
||||
There are 2 * 5 * 2 (20) combinations of monomorphized values.
|
||||
|
||||
Key rules
|
||||
- do not use top-level generic functions.
|
||||
Due to type inference, monomorphizing them proves challenging
|
||||
- only use generic methods.
|
||||
Monomorphizing is done at the type once, and method names need not change
|
||||
- do not have method calls have a parameter of an encWriter or decReader.
|
||||
All those calls are handled directly by the driver.
|
||||
- Include a helper type for each parameterized thing, and add all generic functions to them e.g.
|
||||
helperEncWriter[T encWriter]
|
||||
helperEncReader[T decReader]
|
||||
helperEncDriver[T encDriver]
|
||||
helperDecDriver[T decDriver]
|
||||
- Always use T as the generic type name (when needed)
|
||||
- No inline types
|
||||
- No closures taking parameters of generic types
|
||||
|
||||
*/
|
||||
/*
|
||||
Naming convention:
|
||||
|
||||
Currently, as generic and non-generic types/functions/vars are put in the same files,
|
||||
we suffer because:
|
||||
- build takes longer as non-generic code is built when a build tag wants only monomorphised code
|
||||
- files have many lines which are not used at runtime (due to type parameters)
|
||||
- code coverage is inaccurate on a single run
|
||||
|
||||
To resolve this, we are streamlining our file naming strategy.
|
||||
|
||||
Basically, we will have the following nomenclature for filenames:
|
||||
- fastpath (tag:notfastpath): *.notfastpath.*.go vs *.fastpath.*.go
|
||||
- typed parameters (tag:notmono): *.notmono.*.go vs *.mono.*.go
|
||||
- safe (tag:safe): *.safe.*.go vs *.unsafe.go
|
||||
- generated files: *.generated.go
|
||||
- all others (tags:N/A): *.go without safe/mono/fastpath/generated in the name
|
||||
|
||||
The following files will be affected and split/renamed accordingly
|
||||
|
||||
Base files:
|
||||
- binc.go
|
||||
- cbor.go
|
||||
- json.go
|
||||
- msgpack.go
|
||||
- simple.go
|
||||
- decode.go
|
||||
- encode.go
|
||||
|
||||
For each base file, split into __file__.go (containing type parameters) and __file__.base.go.
|
||||
__file__.go will only build with notmono.
|
||||
|
||||
Other files:
|
||||
- fastpath.generated.go -> base.fastpath.generated.go and base.fastpath.notmono.generated.go
|
||||
- fastpath.not.go -> base.notfastpath.go
|
||||
- init.go -> init.notmono.go
|
||||
|
||||
Appropriate build tags will be included in the files, and the right ones only used for
|
||||
monomorphization.
|
||||
*/
|
||||
/*
|
||||
Caching Handle options for fast runtime use
|
||||
|
||||
If using cached values from Handle options, then
|
||||
- re-cache them at each reset() call
|
||||
- reset is always called at the start of each (Must)(En|De)code
|
||||
- which calls (en|de)coder.reset([]byte|io.Reader|String)
|
||||
- which calls (en|de)cDriver.reset()
|
||||
- at reset, (en|de)c(oder|Driver) can re-cache Handle options before each run
|
||||
|
||||
Some examples:
|
||||
- json: e.rawext,di,d,ks,is / d.rawext
|
||||
- decode: (decoderBase) d.jsms,mtr,str,
|
||||
*/
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue