diff --git a/go.mod b/go.mod index dae5876ba..ba8e69eba 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24 toolchain go1.24.3 // Replace go-swagger with our version that fixes (ours particularly) use of Go1.23 -replace github.com/go-swagger/go-swagger => codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix +replace github.com/go-swagger/go-swagger => codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix // Replace modernc/sqlite with our version that fixes the concurrency INTERRUPT issue replace modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.38.0-concurrency-workaround @@ -34,16 +34,16 @@ require ( codeberg.org/gruf/go-storage v0.3.1 codeberg.org/gruf/go-structr v0.9.7 github.com/DmitriyVTitov/size v1.5.0 - github.com/KimMachineGun/automemlimit v0.7.2 + github.com/KimMachineGun/automemlimit v0.7.3 github.com/SherClockHolmes/webpush-go v1.4.0 github.com/buckket/go-blurhash v1.1.0 github.com/coreos/go-oidc/v3 v3.14.1 - github.com/gin-contrib/cors v1.7.5 + github.com/gin-contrib/cors v1.7.6 github.com/gin-contrib/gzip v1.2.3 github.com/gin-contrib/sessions v1.0.4 github.com/gin-gonic/gin v1.10.1 github.com/go-playground/form/v4 v4.2.1 - github.com/go-swagger/go-swagger v0.31.0 + github.com/go-swagger/go-swagger v0.32.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 github.com/gorilla/feeds v1.2.0 @@ -52,13 +52,13 @@ require ( github.com/k3a/html2text v1.2.1 github.com/microcosm-cc/bluemonday v1.0.27 github.com/miekg/dns v1.1.66 - github.com/minio/minio-go/v7 v7.0.92 + github.com/minio/minio-go/v7 v7.0.94 github.com/mitchellh/mapstructure v1.5.0 github.com/ncruces/go-sqlite3 v0.26.3 github.com/oklog/ulid v1.3.1 github.com/pquerna/otp v1.5.0 github.com/rivo/uniseg v0.4.7 - github.com/spf13/cast v1.8.0 + github.com/spf13/cast v1.9.2 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.20.1 @@ -69,29 +69,29 @@ require ( github.com/tetratelabs/wazero v1.9.0 github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 github.com/ulule/limiter/v3 v3.11.2 - github.com/uptrace/bun v1.2.11 - github.com/uptrace/bun/dialect/pgdialect v1.2.11 - github.com/uptrace/bun/dialect/sqlitedialect v1.2.11 - github.com/uptrace/bun/extra/bunotel v1.2.11 + github.com/uptrace/bun v1.2.14 + github.com/uptrace/bun/dialect/pgdialect v1.2.14 + github.com/uptrace/bun/dialect/sqlitedialect v1.2.14 + github.com/uptrace/bun/extra/bunotel v1.2.14 github.com/wagslane/go-password-validator v0.3.0 github.com/yuin/goldmark v1.7.12 - go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 - go.opentelemetry.io/otel v1.36.0 - go.opentelemetry.io/otel/metric v1.36.0 - go.opentelemetry.io/otel/sdk v1.36.0 - go.opentelemetry.io/otel/sdk/metric v1.36.0 - go.opentelemetry.io/otel/trace v1.36.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.62.0 + go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0 + go.opentelemetry.io/otel v1.37.0 + go.opentelemetry.io/otel/metric v1.37.0 + go.opentelemetry.io/otel/sdk v1.37.0 + go.opentelemetry.io/otel/sdk/metric v1.37.0 + go.opentelemetry.io/otel/trace v1.37.0 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.39.0 - golang.org/x/image v0.27.0 - golang.org/x/net v0.40.0 + golang.org/x/image v0.28.0 + golang.org/x/net v0.41.0 golang.org/x/oauth2 v0.30.0 golang.org/x/sys v0.33.0 golang.org/x/text v0.26.0 gopkg.in/mcuadros/go-syslog.v2 v2.3.0 gopkg.in/yaml.v3 v3.0.1 - modernc.org/sqlite v1.37.1 + modernc.org/sqlite v1.38.0 mvdan.cc/xurls/v2 v2.6.0 ) @@ -108,7 +108,7 @@ require ( github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect - github.com/bytedance/sonic v1.13.2 // indirect + github.com/bytedance/sonic v1.13.3 // indirect github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -122,12 +122,12 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.8 // indirect - github.com/gin-contrib/sse v1.0.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.9 // indirect + github.com/gin-contrib/sse v1.1.0 // indirect github.com/go-errors/errors v1.1.1 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect @@ -153,7 +153,7 @@ require ( github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/sessions v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -181,13 +181,13 @@ require ( github.com/ncruces/go-strftime v0.1.9 // indirect github.com/ncruces/julianday v1.0.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.64.0 // indirect + github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect @@ -204,36 +204,36 @@ require ( github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.12 // indirect + github.com/ugorji/go/codec v1.3.0 // indirect github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.mongodb.org/mongo-driver v1.17.3 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.58.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect - go.opentelemetry.io/otel/log v0.12.2 // indirect - go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect - go.opentelemetry.io/proto/otlp v1.6.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.62.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.59.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect + go.opentelemetry.io/otel/log v0.13.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.13.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.16.0 // indirect + golang.org/x/arch v0.18.0 // indirect golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect golang.org/x/mod v0.25.0 // indirect golang.org/x/sync v0.15.0 // indirect golang.org/x/tools v0.33.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect - google.golang.org/grpc v1.72.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect modernc.org/libc v1.65.10 // indirect diff --git a/go.sum b/go.sum index 13d5cbc82..ea1dc23c9 100644 --- a/go.sum +++ b/go.sum @@ -54,12 +54,12 @@ codeberg.org/gruf/go-storage v0.3.1 h1:g66UIM/xXnEk9ejT+W0T9s/PODBZhXa/8ajzeY/ME codeberg.org/gruf/go-storage v0.3.1/go.mod h1:r43n/zi7YGOCl2iSl7AMI27D1zcWS65Bi2+5xDzypeo= codeberg.org/gruf/go-structr v0.9.7 h1:yQeIxTjYb6reNdgESk915twyjolydYBqat/mlZrP7bg= codeberg.org/gruf/go-structr v0.9.7/go.mod h1:9k5hYztZ4PsBS+m1v5hUTeFiVUBTLF5VA7d9cd1OEMs= -codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix h1:+JvBZqsQfdT+ROnk2DkvXsKQ9QBorKKKBk5fBqw62I8= -codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix/go.mod h1:WSigRRWEig8zV6t6Sm8Y+EmUjlzA/HoaZJ5edupq7po= +codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix h1:k76/Th+bruqU/d+dB0Ru466ctTF2aVjKpisy/471ILE= +codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix/go.mod h1:lAwO1nKff3qNRJYVQeTCl1am5pcNiiA2VyDf8TqzS24= github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g= github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= -github.com/KimMachineGun/automemlimit v0.7.2 h1:DyfHI7zLWmZPn2Wqdy2AgTiUvrGPmnYWgwhHXtAegX4= -github.com/KimMachineGun/automemlimit v0.7.2/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= +github.com/KimMachineGun/automemlimit v0.7.3 h1:oPgMp0bsWez+4fvgSa11Rd9nUDrd8RLtDjBoT3ro+/A= +github.com/KimMachineGun/automemlimit v0.7.3/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= @@ -85,8 +85,8 @@ github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8 github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do= github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0= +github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= @@ -137,18 +137,18 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg= github.com/fxamacker/cbor v1.5.1/go.mod h1:3aPGItF174ni7dDzd6JZ206H8cmr4GDNBGpPa971zsU= -github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= -github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= +github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= github.com/gavv/httpexpect/v2 v2.17.0 h1:nIJqt5v5e4P7/0jODpX2gtSw+pHXUqdP28YcjqwDZmE= github.com/gavv/httpexpect/v2 v2.17.0/go.mod h1:E8ENFlT9MZ3Si2sfM6c6ONdwXV2noBCGkhA+lkJgkP0= -github.com/gin-contrib/cors v1.7.5 h1:cXC9SmofOrRg0w9PigwGlHG3ztswH6bqq4vJVXnvYMk= -github.com/gin-contrib/cors v1.7.5/go.mod h1:4q3yi7xBEDDWKapjT2o1V7mScKDDr8k+jZ0fSquGoy0= +github.com/gin-contrib/cors v1.7.6 h1:3gQ8GMzs1Ylpf70y8bMw4fVpycXIeX1ZemuSQIsnQQY= +github.com/gin-contrib/cors v1.7.6/go.mod h1:Ulcl+xN4jel9t1Ry8vqph23a60FwH9xVLd+3ykmTjOk= github.com/gin-contrib/gzip v1.2.3 h1:dAhT722RuEG330ce2agAs75z7yB+NKvX/ZM1r8w0u2U= github.com/gin-contrib/gzip v1.2.3/go.mod h1:ad72i4Bzmaypk8M762gNXa2wkxxjbz0icRNnuLJ9a/c= github.com/gin-contrib/sessions v1.0.4 h1:ha6CNdpYiTOK/hTp05miJLbpTSNfOnFg5Jm2kbcqy8U= github.com/gin-contrib/sessions v1.0.4/go.mod h1:ccmkrb2z6iU2osiAHZG3x3J4suJK+OU27oqzlWOqQgs= -github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= -github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= +github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= +github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -160,8 +160,8 @@ github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= @@ -249,8 +249,8 @@ github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzq github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -311,8 +311,8 @@ github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.92 h1:jpBFWyRS3p8P/9tsRc+NuvqoFi7qAmTCFPoRFmobbVw= -github.com/minio/minio-go/v7 v7.0.92/go.mod h1:vTIc8DNcnAZIhyFsk8EB90AbPjj3j68aWIEQCiPj7d0= +github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM= +github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -342,8 +342,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -360,8 +360,8 @@ github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/ github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= -github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= @@ -398,8 +398,8 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -409,7 +409,6 @@ github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqj github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -417,7 +416,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -460,18 +458,18 @@ github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9r github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= +github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA= github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI= -github.com/uptrace/bun v1.2.11 h1:l9dTymsdZZAoSZ1+Qo3utms0RffgkDbIv+1UGk8N1wQ= -github.com/uptrace/bun v1.2.11/go.mod h1:ww5G8h59UrOnCHmZ8O1I/4Djc7M/Z3E+EWFS2KLB6dQ= -github.com/uptrace/bun/dialect/pgdialect v1.2.11 h1:n0VKWm1fL1dwJK5TRxYYLaRKRe14BOg2+AQgpvqzG/M= -github.com/uptrace/bun/dialect/pgdialect v1.2.11/go.mod h1:NvV1S/zwtwBnW8yhJ3XEKAQEw76SkeH7yUhfrx3W1Eo= -github.com/uptrace/bun/dialect/sqlitedialect v1.2.11 h1:t4OIcbkWnRPshRj7ZnbHVwUENa3OHhCUruyFcl3P+TY= -github.com/uptrace/bun/dialect/sqlitedialect v1.2.11/go.mod h1:XHFFTvdlNtNFWPhpRAConN6DnVgt9EHr5G5IIarHYyg= -github.com/uptrace/bun/extra/bunotel v1.2.11 h1:ddt96XrbvlVZu5vBddP6WmbD6bdeJTaWY9jXlfuJKZE= -github.com/uptrace/bun/extra/bunotel v1.2.11/go.mod h1:w6Mhie5tLFeP+5ryjq4PvgZEESRJ1iL2cbvxhm+f8q4= +github.com/uptrace/bun v1.2.14 h1:5yFSfi/yVWEzQ2lAaHz+JfWN9AHmqYtNmlbaUbAp3rU= +github.com/uptrace/bun v1.2.14/go.mod h1:ZS4nPaEv2Du3OFqAD/irk3WVP6xTB3/9TWqjJbgKYBU= +github.com/uptrace/bun/dialect/pgdialect v1.2.14 h1:1jmCn7zcYIJDSk1pJO//b11k9NQP1rpWZoyxfoNdpzI= +github.com/uptrace/bun/dialect/pgdialect v1.2.14/go.mod h1:MrRlsIpWIyOCNosWuG8bVtLb80JyIER5ci0VlTa38dU= +github.com/uptrace/bun/dialect/sqlitedialect v1.2.14 h1:eLXmNpy2TSsWJNpyIIIeLBa5M+Xxc4n8jX5ASeuvWrg= +github.com/uptrace/bun/dialect/sqlitedialect v1.2.14/go.mod h1:oORBd9Y7RiAOHAshjuebSFNPZNPLXYcvEWmibuJ8RRk= +github.com/uptrace/bun/extra/bunotel v1.2.14 h1:LPg/1kEOcwex5w7+Boh6Rdc3xi1PuMVZV06isOPEPaU= +github.com/uptrace/bun/extra/bunotel v1.2.14/go.mod h1:V509v+akUAx31NbN96WEhkY+rBPJxI0Ul+beKNN1Ato= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -507,60 +505,60 @@ go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeH go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= -go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= -go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= -go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 h1:oIZsTHd0YcrvvUCN2AaQqyOcd685NQ+rFmrajveCIhA= -go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0/go.mod h1:X4KSPIvxnY/G5c9UOGXtFoL91t1gmlHpDQzeK5Zc/Bw= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= -go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= -go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= -go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= -go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= -go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= -go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= +go.opentelemetry.io/contrib/bridges/prometheus v0.62.0 h1:0mfk3D3068LMGpIhxwc0BqRlBOBHVgTP9CygmnJM/TI= +go.opentelemetry.io/contrib/bridges/prometheus v0.62.0/go.mod h1:hStk98NJy1wvlrXIqWsli+uELxRRseBMld+gfm2xPR4= +go.opentelemetry.io/contrib/exporters/autoexport v0.62.0 h1:aCpZ6vvmOj5GHg1eUygjS/05mlQaEBsQDdTw5yT8EsE= +go.opentelemetry.io/contrib/exporters/autoexport v0.62.0/go.mod h1:1xHkmmL3bQm8m86HVoZTdgK/LIY5JpxdAWjog6cdtUs= +go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0 h1:ZIt0ya9/y4WyRIzfLC8hQRRsWg0J9M9GyaGtIMiElZI= +go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0/go.mod h1:F1aJ9VuiKWOlWwKdTYDUp1aoS0HzQxg38/VLxKmhm5U= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0 h1:z6lNIajgEBVtQZHjfw2hAccPEBDs+nx58VemmXWa2ec= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0/go.mod h1:+kyc3bRx/Qkq05P6OCu3mTEIOxYRYzoIg+JsUp5X+PM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0 h1:zUfYw8cscHHLwaY8Xz3fiJu+R59xBnkgq2Zr1lwmK/0= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0/go.mod h1:514JLMCcFLQFS8cnTepOk6I09cKWJ5nGHBxHrMJ8Yfg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 h1:zG8GlgXCJQd5BU98C0hZnBbElszTmUgCNCfYneaDL0A= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0/go.mod h1:hOfBCz8kv/wuq73Mx2H2QnWokh/kHZxkh6SNF2bdKtw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 h1:9PgnL3QNlj10uGxExowIDIZu66aVBwWhXmbOp1pa6RA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0/go.mod h1:0ineDcLELf6JmKfuo0wvvhAVMuxWFYvkTin2iV4ydPQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA= +go.opentelemetry.io/otel/exporters/prometheus v0.59.0 h1:HHf+wKS6o5++XZhS98wvILrLVgHxjA/AMjqHKes+uzo= +go.opentelemetry.io/otel/exporters/prometheus v0.59.0/go.mod h1:R8GpRXTZrqvXHDEGVH5bF6+JqAZcK8PjJcZ5nGhEWiE= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 h1:yEX3aC9KDgvYPhuKECHbOlr5GLwH6KTjLJ1sBSkkxkc= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0/go.mod h1:/GXR0tBmmkxDaCUGahvksvp66mx4yh5+cFXgSlhg0vQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 h1:6VjV6Et+1Hd2iLZEPtdV7vie80Yyqf7oikJLjQ/myi0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0/go.mod h1:u8hcp8ji5gaM/RfcOo8z9NMnf1pVLfVY7lBY2VOGuUU= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4= +go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls= +go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/log v0.13.0 h1:I3CGUszjM926OphK8ZdzF+kLqFvfRY/IIoFq/TjwfaQ= +go.opentelemetry.io/otel/sdk/log v0.13.0/go.mod h1:lOrQyCCXmpZdN7NchXb6DOZZa1N5G1R2tm5GMMTpDBw= +go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 h1:9yio6AFZ3QD9j9oqshV1Ibm9gPLlHNxurno5BreMtIA= +go.opentelemetry.io/otel/sdk/log/logtest v0.13.0/go.mod h1:QOGiAJHl+fob8Nu85ifXfuQYmJTFAvcrxL6w5/tu168= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/arch v0.16.0 h1:foMtLTdyOmIniqWCHjY6+JxuC54XP1fDwx4N0ASyW+U= -golang.org/x/arch v0.16.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= +golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc= +golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= @@ -572,8 +570,8 @@ golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= -golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w= -golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g= +golang.org/x/image v0.28.0 h1:gdem5JW1OLS4FbkWgLO+7ZeFzYtL3xClb97GaUzYMFE= +golang.org/x/image v0.28.0/go.mod h1:GUJYXtnGKEUgggyzh+Vxt+AviiCcyiwpsl8iQ8MvwGY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -596,8 +594,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -660,12 +658,12 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= -google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go b/vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go index cbd53ce3a..b23980a51 100644 --- a/vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go +++ b/vendor/github.com/KimMachineGun/automemlimit/memlimit/memlimit.go @@ -169,7 +169,7 @@ func SetGoMemLimitWithOpts(opts ...Option) (_ int64, _err error) { // set the memory limit and start refresh limit, err := updateGoMemLimit(uint64(snapshot), provider, cfg.logger) - go refresh(provider, cfg.logger, cfg.refresh) + refresh(provider, cfg.logger, cfg.refresh) if err != nil { if errors.Is(err, ErrNoLimit) { cfg.logger.Info("memory is not limited, skipping") @@ -200,7 +200,7 @@ func updateGoMemLimit(currLimit uint64, provider Provider, logger *slog.Logger) return newLimit, nil } -// refresh periodically fetches the memory limit from the provider and reapplies it if it has changed. +// refresh spawns a goroutine that runs every refresh duration and updates the GOMEMLIMIT if it has changed. // See more details in the documentation of WithRefreshInterval. func refresh(provider Provider, logger *slog.Logger, refresh time.Duration) { if refresh == 0 { @@ -210,22 +210,24 @@ func refresh(provider Provider, logger *slog.Logger, refresh time.Duration) { provider = noErrNoLimitProvider(provider) t := time.NewTicker(refresh) - for range t.C { - err := func() (_err error) { - snapshot := debug.SetMemoryLimit(-1) - defer rollbackOnPanic(logger, snapshot, &_err) + go func() { + for range t.C { + err := func() (_err error) { + snapshot := debug.SetMemoryLimit(-1) + defer rollbackOnPanic(logger, snapshot, &_err) - _, err := updateGoMemLimit(uint64(snapshot), provider, logger) + _, err := updateGoMemLimit(uint64(snapshot), provider, logger) + if err != nil { + return err + } + + return nil + }() if err != nil { - return err + logger.Error("failed to refresh GOMEMLIMIT", slog.Any("error", err)) } - - return nil - }() - if err != nil { - logger.Error("failed to refresh GOMEMLIMIT", slog.Any("error", err)) } - } + }() } // rollbackOnPanic rollbacks to the snapshot on panic. diff --git a/vendor/github.com/bytedance/sonic/.gitmodules b/vendor/github.com/bytedance/sonic/.gitmodules index ea84b991a..5a2d998ab 100644 --- a/vendor/github.com/bytedance/sonic/.gitmodules +++ b/vendor/github.com/bytedance/sonic/.gitmodules @@ -4,3 +4,6 @@ [submodule "tools/simde"] path = tools/simde url = https://github.com/simd-everywhere/simde.git +[submodule "fuzz/go-fuzz-corpus"] + path = fuzz/go-fuzz-corpus + url = https://github.com/dvyukov/go-fuzz-corpus.git diff --git a/vendor/github.com/bytedance/sonic/README.md b/vendor/github.com/bytedance/sonic/README.md index 317878d09..41fe77658 100644 --- a/vendor/github.com/bytedance/sonic/README.md +++ b/vendor/github.com/bytedance/sonic/README.md @@ -385,12 +385,12 @@ See [ast/visitor.go](https://github.com/bytedance/sonic/blob/main/ast/visitor.go ## Compatibility -For developers who want to use sonic to meet diffirent scenarios, we provide some integrated configs as `sonic.API` +For developers who want to use sonic to meet different scenarios, we provide some integrated configs as `sonic.API` - `ConfigDefault`: the sonic's default config (`EscapeHTML=false`,`SortKeys=false`...) to run sonic fast meanwhile ensure security. - `ConfigStd`: the std-compatible config (`EscapeHTML=true`,`SortKeys=true`...) - `ConfigFastest`: the fastest config (`NoQuoteTextMarshaler=true`) to run on sonic as fast as possible. -Sonic **DOES NOT** ensure to support all environments, due to the difficulty of developing high-performance codes. On non-sonic-supporting environment, the implementation will fall back to `encoding/json`. Thus beflow configs will all equal to `ConfigStd`. +Sonic **DOES NOT** ensure to support all environments, due to the difficulty of developing high-performance codes. On non-sonic-supporting environment, the implementation will fall back to `encoding/json`. Thus below configs will all equal to `ConfigStd`. ## Tips diff --git a/vendor/github.com/bytedance/sonic/api.go b/vendor/github.com/bytedance/sonic/api.go index 406715eca..3858d9a80 100644 --- a/vendor/github.com/bytedance/sonic/api.go +++ b/vendor/github.com/bytedance/sonic/api.go @@ -94,6 +94,9 @@ type Config struct { // Encode Infinity or Nan float into `null`, instead of returning an error. EncodeNullForInfOrNan bool + + // CaseSensitive indicates that the decoder should not ignore the case of object keys. + CaseSensitive bool } var ( @@ -111,7 +114,6 @@ var ( // ConfigFastest is the fastest config of APIs, aiming at speed. ConfigFastest = Config{ - NoQuoteTextMarshaler: true, NoValidateJSONMarshaler: true, NoValidateJSONSkip: true, }.Froze() diff --git a/vendor/github.com/bytedance/sonic/ast/iterator.go b/vendor/github.com/bytedance/sonic/ast/iterator.go index 1052dd0a0..978028a65 100644 --- a/vendor/github.com/bytedance/sonic/ast/iterator.go +++ b/vendor/github.com/bytedance/sonic/ast/iterator.go @@ -176,7 +176,7 @@ type Scanner func(path Sequence, node *Node) bool // Especially, if the node is not V_ARRAY or V_OBJECT, // the node itself will be returned and Sequence.Index == -1. // -// NOTICE: A unsetted node WON'T trigger sc, but its index still counts into Path.Index +// NOTICE: An unset node WON'T trigger sc, but its index still counts into Path.Index func (self *Node) ForEach(sc Scanner) error { if err := self.checkRaw(); err != nil { return err diff --git a/vendor/github.com/bytedance/sonic/ast/node.go b/vendor/github.com/bytedance/sonic/ast/node.go index 17964c32f..1c5ff6439 100644 --- a/vendor/github.com/bytedance/sonic/ast/node.go +++ b/vendor/github.com/bytedance/sonic/ast/node.go @@ -509,6 +509,23 @@ func (self *Node) Float64() (float64, error) { } } +func (self *Node) StrictBool() (bool, error) { + if err := self.checkRaw(); err!= nil { + return false, err + } + switch self.t { + case types.V_TRUE : return true, nil + case types.V_FALSE : return false, nil + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case bool : return v, nil + default : return false, ErrUnsupportType + } + default : return false, ErrUnsupportType + } +} + // Float64 exports underlying float64 value, including V_NUMBER, V_ANY func (self *Node) StrictFloat64() (float64, error) { if err := self.checkRaw(); err != nil { @@ -776,7 +793,7 @@ func (self *Node) Pop() error { } // Move moves the child at src index to dst index, -// meanwhile slides sliblings from src+1 to dst. +// meanwhile slides siblings from src+1 to dst. // // WARN: this will change address of elements, which is a dangerous action. func (self *Node) Move(dst, src int) error { @@ -816,7 +833,7 @@ func (self *Node) Move(dst, src int) error { return nil } -// SetAny wraps val with V_ANY node, and Add() the node. +// AddAny wraps val with V_ANY node, and Add() the node. func (self *Node) AddAny(val interface{}) error { return self.Add(NewAny(val)) } @@ -938,7 +955,7 @@ func (self *Node) Map() (map[string]interface{}, error) { return self.toGenericObject() } -// MapUseNumber loads all keys of an object node, with numeric nodes casted to json.Number +// MapUseNumber loads all keys of an object node, with numeric nodes cast to json.Number func (self *Node) MapUseNumber() (map[string]interface{}, error) { if self.isAny() { any := self.packAny() @@ -1083,7 +1100,7 @@ func (self *Node) Array() ([]interface{}, error) { return self.toGenericArray() } -// ArrayUseNumber loads all indexes of an array node, with numeric nodes casted to json.Number +// ArrayUseNumber loads all indexes of an array node, with numeric nodes cast to json.Number func (self *Node) ArrayUseNumber() ([]interface{}, error) { if self.isAny() { any := self.packAny() @@ -1149,7 +1166,7 @@ func (self *Node) unsafeArray() (*linkedNodes, error) { // Interface loads all children under all paths from this node, // and converts itself as generic type. -// WARN: all numeric nodes are casted to float64 +// WARN: all numeric nodes are cast to float64 func (self *Node) Interface() (interface{}, error) { if err := self.checkRaw(); err != nil { return nil, err @@ -1193,7 +1210,7 @@ func (self *Node) packAny() interface{} { } // InterfaceUseNumber works same with Interface() -// except numeric nodes are casted to json.Number +// except numeric nodes are cast to json.Number func (self *Node) InterfaceUseNumber() (interface{}, error) { if err := self.checkRaw(); err != nil { return nil, err diff --git a/vendor/github.com/bytedance/sonic/ast/parser.go b/vendor/github.com/bytedance/sonic/ast/parser.go index 30bd1f451..aee96f86a 100644 --- a/vendor/github.com/bytedance/sonic/ast/parser.go +++ b/vendor/github.com/bytedance/sonic/ast/parser.go @@ -63,7 +63,7 @@ func (self *Parser) delim() types.ParsingError { return types.ERR_EOF } - /* check for the delimtier */ + /* check for the delimiter */ if self.s[p] != ':' { return types.ERR_INVALID_CHAR } @@ -82,7 +82,7 @@ func (self *Parser) object() types.ParsingError { return types.ERR_EOF } - /* check for the delimtier */ + /* check for the delimiter */ if self.s[p] != '{' { return types.ERR_INVALID_CHAR } @@ -101,7 +101,7 @@ func (self *Parser) array() types.ParsingError { return types.ERR_EOF } - /* check for the delimtier */ + /* check for the delimiter */ if self.s[p] != '[' { return types.ERR_INVALID_CHAR } @@ -638,7 +638,7 @@ func Loads(src string) (int, interface{}, error) { } } -// LoadsUseNumber parse all json into interface{}, with numeric nodes casted to json.Number +// LoadsUseNumber parse all json into interface{}, with numeric nodes cast to json.Number func LoadsUseNumber(src string) (int, interface{}, error) { ps := &Parser{s: src} np, err := ps.Parse() diff --git a/vendor/github.com/bytedance/sonic/ast/visitor.go b/vendor/github.com/bytedance/sonic/ast/visitor.go index dc0478513..fc71d40cb 100644 --- a/vendor/github.com/bytedance/sonic/ast/visitor.go +++ b/vendor/github.com/bytedance/sonic/ast/visitor.go @@ -178,7 +178,7 @@ func (self *traverser) decodeArray() error { /* allocate array space and parse every element */ if err := self.visitor.OnArrayBegin(_DEFAULT_NODE_CAP); err != nil { if err == VisitOPSkip { - // NOTICE: for user needs to skip entiry object + // NOTICE: for user needs to skip entry object self.parser.p -= 1 if _, e := self.parser.skipFast(); e != 0 { return e @@ -233,7 +233,7 @@ func (self *traverser) decodeObject() error { /* allocate object space and decode each pair */ if err := self.visitor.OnObjectBegin(_DEFAULT_NODE_CAP); err != nil { if err == VisitOPSkip { - // NOTICE: for user needs to skip entiry object + // NOTICE: for user needs to skip entry object self.parser.p -= 1 if _, e := self.parser.skipFast(); e != 0 { return e @@ -328,5 +328,5 @@ func (self *traverser) decodeString(iv int64, ep int) error { } // If visitor return this error on `OnObjectBegin()` or `OnArrayBegin()`, -// the transverer will skip entiry object or array +// the traverser will skip entry object or array var VisitOPSkip = errors.New("") diff --git a/vendor/github.com/bytedance/sonic/compat.go b/vendor/github.com/bytedance/sonic/compat.go index b694d7ce9..ec996493a 100644 --- a/vendor/github.com/bytedance/sonic/compat.go +++ b/vendor/github.com/bytedance/sonic/compat.go @@ -87,7 +87,17 @@ func (cfg frozenConfig) UnmarshalFromString(buf string, val interface{}) error { if cfg.DisallowUnknownFields { dec.DisallowUnknownFields() } - return dec.Decode(val) + err := dec.Decode(val) + if err != nil { + return err + } + + // check the trailing chars + offset := dec.InputOffset() + if t, err := dec.Token(); !(t == nil && err == io.EOF) { + return &json.SyntaxError{ Offset: offset} + } + return nil } // Unmarshal is implemented by sonic diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go b/vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go index 8a8102dd5..ecf120462 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/api/stream.go @@ -76,11 +76,12 @@ func (self *StreamDecoder) Decode(val interface{}) (err error) { if y := native.SkipOneFast(&src, &x); y < 0 { if self.readMore() { goto try_skip - } else { - err = SyntaxError{e, self.s, types.ParsingError(-s), ""} - self.setErr(err) - return + } + if self.err == nil { + self.err = SyntaxError{e, self.s, types.ParsingError(-s), ""} + self.setErr(self.err) } + return self.err } else { s = y + s e = x + s diff --git a/vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go b/vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go index 774b6eef7..085e81102 100644 --- a/vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go +++ b/vendor/github.com/bytedance/sonic/internal/decoder/optdec/node.go @@ -12,7 +12,7 @@ import ( type Context struct { Parser *Parser efacePool *efacePool - Stack bounedStack + Stack boundedStack Utf8Inv bool } @@ -26,20 +26,20 @@ type parentStat struct { con unsafe.Pointer remain uint64 } -type bounedStack struct { +type boundedStack struct { stack []parentStat index int } -func newStack(size int) bounedStack { - return bounedStack{ +func newStack(size int) boundedStack { + return boundedStack{ stack: make([]parentStat, size + 2), index: 0, } } //go:nosplit -func (s *bounedStack) Pop() (unsafe.Pointer, int, bool){ +func (s *boundedStack) Pop() (unsafe.Pointer, int, bool){ s.index-- con := s.stack[s.index].con remain := s.stack[s.index].remain &^ (uint64(1) << 63) @@ -50,7 +50,7 @@ func (s *bounedStack) Pop() (unsafe.Pointer, int, bool){ } //go:nosplit -func (s *bounedStack) Push(p unsafe.Pointer, remain int, isObj bool) { +func (s *boundedStack) Push(p unsafe.Pointer, remain int, isObj bool) { s.stack[s.index].con = p s.stack[s.index].remain = uint64(remain) if isObj { @@ -1253,7 +1253,7 @@ func (node *Node) AsEfaceFallback(ctx *Context) (interface{}, error) { if ctx.Parser.options & (1 << _F_use_number) != 0 { num, ok := node.AsNumber(ctx) if !ok { - // skip the unmacthed type + // skip the unmatched type *node = NewNode(node.Next()) return nil, newUnmatched(node.Position(), rt.JsonNumberType) } else { @@ -1275,13 +1275,13 @@ func (node *Node) AsEfaceFallback(ctx *Context) (interface{}, error) { return f, nil } - // skip the unmacthed type + // skip the unmatched type *node = NewNode(node.Next()) return nil, newUnmatched(node.Position(), rt.Int64Type) } else { num, ok := node.AsF64(ctx) if !ok { - // skip the unmacthed type + // skip the unmatched type *node = NewNode(node.Next()) return nil, newUnmatched(node.Position(), rt.Float64Type) } else { diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/alg/mapiter.go b/vendor/github.com/bytedance/sonic/internal/encoder/alg/mapiter.go index 032ae3b8a..090afac13 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/alg/mapiter.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/alg/mapiter.go @@ -97,17 +97,18 @@ func (self *MapIterator) append(t *rt.GoType, k unsafe.Pointer, v unsafe.Pointer func (self *MapIterator) appendGeneric(p *_MapPair, t *rt.GoType, v reflect.Kind, k unsafe.Pointer) error { switch v { - case reflect.Int : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int)(k)), 10)) ; return nil - case reflect.Int8 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int8)(k)), 10)) ; return nil - case reflect.Int16 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int16)(k)), 10)) ; return nil - case reflect.Int32 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int32)(k)), 10)) ; return nil - case reflect.Int64 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int64)(k)), 10)) ; return nil + case reflect.Int : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int)(k)), 10)) ; return nil + case reflect.Int8 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int8)(k)), 10)) ; return nil + case reflect.Int16 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int16)(k)), 10)) ; return nil + case reflect.Int32 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int32)(k)), 10)) ; return nil + case reflect.Int64 : p.k = rt.Mem2Str(strconv.AppendInt(p.m[:0], int64(*(*int64)(k)), 10)) ; return nil case reflect.Uint : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint)(k)), 10)) ; return nil case reflect.Uint8 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint8)(k)), 10)) ; return nil case reflect.Uint16 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint16)(k)), 10)) ; return nil case reflect.Uint32 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint32)(k)), 10)) ; return nil - case reflect.Uint64 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint64)(k)), 10)) ; return nil + case reflect.Uint64 : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uint64)(k)), 10)) ; return nil case reflect.Uintptr : p.k = rt.Mem2Str(strconv.AppendUint(p.m[:0], uint64(*(*uintptr)(k)), 10)) ; return nil + case reflect.Bool : if *(*bool)(k) { p.k = "true" } else { p.k = "false" }; return nil case reflect.Interface : return self.appendInterface(p, t, k) case reflect.Struct, reflect.Ptr : return self.appendConcrete(p, t, k) default : panic("unexpected map key type") diff --git a/vendor/github.com/bytedance/sonic/internal/encoder/alg/spec.go b/vendor/github.com/bytedance/sonic/internal/encoder/alg/spec.go index 6f76ac739..ecdbfb7bd 100644 --- a/vendor/github.com/bytedance/sonic/internal/encoder/alg/spec.go +++ b/vendor/github.com/bytedance/sonic/internal/encoder/alg/spec.go @@ -21,6 +21,7 @@ package alg import ( "runtime" + "strconv" "unsafe" "github.com/bytedance/sonic/internal/native" @@ -177,22 +178,9 @@ func F32toa(buf []byte, v float32) ([]byte) { } func I64toa(buf []byte, v int64) ([]byte) { - buf = rt.GuardSlice2(buf, 32) - ret := native.I64toa((*byte)(rt.IndexByte(buf, len(buf))), v) - if ret > 0 { - return buf[:len(buf)+ret] - } else { - return buf - } + return strconv.AppendInt(buf, v, 10) } func U64toa(buf []byte, v uint64) ([]byte) { - buf = rt.GuardSlice2(buf, 32) - ret := native.U64toa((*byte)(rt.IndexByte(buf, len(buf))), v) - if ret > 0 { - return buf[:len(buf)+ret] - } else { - return buf - } + return strconv.AppendUint(buf, v, 10) } - diff --git a/vendor/github.com/bytedance/sonic/internal/optcaching/fcache.go b/vendor/github.com/bytedance/sonic/internal/optcaching/fcache.go index 010028203..f207b4b16 100644 --- a/vendor/github.com/bytedance/sonic/internal/optcaching/fcache.go +++ b/vendor/github.com/bytedance/sonic/internal/optcaching/fcache.go @@ -324,7 +324,7 @@ func (self *NormalFieldMap) Set(fields []resolver.FieldMeta) { } -// use hashnap +// use hashmap type FallbackFieldMap struct { oders []string inner map[string]int diff --git a/vendor/github.com/bytedance/sonic/sonic.go b/vendor/github.com/bytedance/sonic/sonic.go index 395730362..9645d5e08 100644 --- a/vendor/github.com/bytedance/sonic/sonic.go +++ b/vendor/github.com/bytedance/sonic/sonic.go @@ -90,6 +90,9 @@ func (cfg Config) Froze() API { if cfg.ValidateString { api.decoderOpts |= decoder.OptionValidateString } + if cfg.CaseSensitive { + api.decoderOpts |= decoder.OptionCaseSensitive + } return api } diff --git a/vendor/github.com/bytedance/sonic/utf8/utf8.go b/vendor/github.com/bytedance/sonic/utf8/utf8.go index 9d8bcc958..c1403fdb2 100644 --- a/vendor/github.com/bytedance/sonic/utf8/utf8.go +++ b/vendor/github.com/bytedance/sonic/utf8/utf8.go @@ -62,7 +62,7 @@ func CorrectWith(dst []byte, src []byte, repl string) []byte { return dst } -// Validate is a simd-accelereated drop-in replacement for the standard library's utf8.Valid. +// Validate is a simd-accelerated drop-in replacement for the standard library's utf8.Valid. func Validate(src []byte) bool { if src == nil { return true diff --git a/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml b/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml new file mode 100644 index 000000000..f2058ccc5 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/.golangci.yml @@ -0,0 +1,5 @@ +version: "2" +linters: + exclusions: + presets: + - std-error-handling diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go deleted file mode 100644 index 5b2ecee44..000000000 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go +++ /dev/null @@ -1,567 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package json provides a JSON value parser state machine. -// This package is almost entirely copied from the Go stdlib. -// Changes made to it permit users of the package to tell -// if some slice of bytes is a valid beginning of a json string. -package json - -import ( - "fmt" - "sync" -) - -type ( - scanStatus int -) - -const ( - parseObjectKey = iota // parsing object key (before colon) - parseObjectValue // parsing object value (after colon) - parseArrayValue // parsing array value - - scanContinue scanStatus = iota // uninteresting byte - scanBeginLiteral // end implied by next result != scanContinue - scanBeginObject // begin object - scanObjectKey // just finished object key (string) - scanObjectValue // just finished non-last object value - scanEndObject // end object (implies scanObjectValue if possible) - scanBeginArray // begin array - scanArrayValue // just finished array value - scanEndArray // end array (implies scanArrayValue if possible) - scanSkipSpace // space byte; can skip; known to be last "continue" result - scanEnd // top-level value ended *before* this byte; known to be first "stop" result - scanError // hit an error, scanner.err. - - // This limits the max nesting depth to prevent stack overflow. - // This is permitted by https://tools.ietf.org/html/rfc7159#section-9 - maxNestingDepth = 10000 -) - -type ( - scanner struct { - step func(*scanner, byte) scanStatus - parseState []int - endTop bool - err error - index int - } -) - -var scannerPool = sync.Pool{ - New: func() any { - return &scanner{} - }, -} - -func newScanner() *scanner { - s := scannerPool.Get().(*scanner) - s.reset() - return s -} - -func freeScanner(s *scanner) { - // Avoid hanging on to too much memory in extreme cases. - if len(s.parseState) > 1024 { - s.parseState = nil - } - scannerPool.Put(s) -} - -// Scan returns the number of bytes scanned and if there was any error -// in trying to reach the end of data. -func Scan(data []byte) (int, error) { - s := newScanner() - defer freeScanner(s) - _ = checkValid(data, s) - return s.index, s.err -} - -// checkValid verifies that data is valid JSON-encoded data. -// scan is passed in for use by checkValid to avoid an allocation. -func checkValid(data []byte, scan *scanner) error { - for _, c := range data { - scan.index++ - if scan.step(scan, c) == scanError { - return scan.err - } - } - if scan.eof() == scanError { - return scan.err - } - return nil -} - -func isSpace(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' -} - -func (s *scanner) reset() { - s.step = stateBeginValue - s.parseState = s.parseState[0:0] - s.err = nil - s.endTop = false - s.index = 0 -} - -// eof tells the scanner that the end of input has been reached. -// It returns a scan status just as s.step does. -func (s *scanner) eof() scanStatus { - if s.err != nil { - return scanError - } - if s.endTop { - return scanEnd - } - s.step(s, ' ') - if s.endTop { - return scanEnd - } - if s.err == nil { - s.err = fmt.Errorf("unexpected end of JSON input") - } - return scanError -} - -// pushParseState pushes a new parse state p onto the parse stack. -// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned. -func (s *scanner) pushParseState(c byte, newParseState int, successState scanStatus) scanStatus { - s.parseState = append(s.parseState, newParseState) - if len(s.parseState) <= maxNestingDepth { - return successState - } - return s.error(c, "exceeded max depth") -} - -// popParseState pops a parse state (already obtained) off the stack -// and updates s.step accordingly. -func (s *scanner) popParseState() { - n := len(s.parseState) - 1 - s.parseState = s.parseState[0:n] - if n == 0 { - s.step = stateEndTop - s.endTop = true - } else { - s.step = stateEndValue - } -} - -// stateBeginValueOrEmpty is the state after reading `[`. -func stateBeginValueOrEmpty(s *scanner, c byte) scanStatus { - if c <= ' ' && isSpace(c) { - return scanSkipSpace - } - if c == ']' { - return stateEndValue(s, c) - } - return stateBeginValue(s, c) -} - -// stateBeginValue is the state at the beginning of the input. -func stateBeginValue(s *scanner, c byte) scanStatus { - if c <= ' ' && isSpace(c) { - return scanSkipSpace - } - switch c { - case '{': - s.step = stateBeginStringOrEmpty - return s.pushParseState(c, parseObjectKey, scanBeginObject) - case '[': - s.step = stateBeginValueOrEmpty - return s.pushParseState(c, parseArrayValue, scanBeginArray) - case '"': - s.step = stateInString - return scanBeginLiteral - case '-': - s.step = stateNeg - return scanBeginLiteral - case '0': // beginning of 0.123 - s.step = state0 - return scanBeginLiteral - case 't': // beginning of true - s.step = stateT - return scanBeginLiteral - case 'f': // beginning of false - s.step = stateF - return scanBeginLiteral - case 'n': // beginning of null - s.step = stateN - return scanBeginLiteral - } - if '1' <= c && c <= '9' { // beginning of 1234.5 - s.step = state1 - return scanBeginLiteral - } - return s.error(c, "looking for beginning of value") -} - -// stateBeginStringOrEmpty is the state after reading `{`. -func stateBeginStringOrEmpty(s *scanner, c byte) scanStatus { - if c <= ' ' && isSpace(c) { - return scanSkipSpace - } - if c == '}' { - n := len(s.parseState) - s.parseState[n-1] = parseObjectValue - return stateEndValue(s, c) - } - return stateBeginString(s, c) -} - -// stateBeginString is the state after reading `{"key": value,`. -func stateBeginString(s *scanner, c byte) scanStatus { - if c <= ' ' && isSpace(c) { - return scanSkipSpace - } - if c == '"' { - s.step = stateInString - return scanBeginLiteral - } - return s.error(c, "looking for beginning of object key string") -} - -// stateEndValue is the state after completing a value, -// such as after reading `{}` or `true` or `["x"`. -func stateEndValue(s *scanner, c byte) scanStatus { - n := len(s.parseState) - if n == 0 { - // Completed top-level before the current byte. - s.step = stateEndTop - s.endTop = true - return stateEndTop(s, c) - } - if c <= ' ' && isSpace(c) { - s.step = stateEndValue - return scanSkipSpace - } - ps := s.parseState[n-1] - switch ps { - case parseObjectKey: - if c == ':' { - s.parseState[n-1] = parseObjectValue - s.step = stateBeginValue - return scanObjectKey - } - return s.error(c, "after object key") - case parseObjectValue: - if c == ',' { - s.parseState[n-1] = parseObjectKey - s.step = stateBeginString - return scanObjectValue - } - if c == '}' { - s.popParseState() - return scanEndObject - } - return s.error(c, "after object key:value pair") - case parseArrayValue: - if c == ',' { - s.step = stateBeginValue - return scanArrayValue - } - if c == ']' { - s.popParseState() - return scanEndArray - } - return s.error(c, "after array element") - } - return s.error(c, "") -} - -// stateEndTop is the state after finishing the top-level value, -// such as after reading `{}` or `[1,2,3]`. -// Only space characters should be seen now. -func stateEndTop(s *scanner, c byte) scanStatus { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { - // Complain about non-space byte on next call. - s.error(c, "after top-level value") - } - return scanEnd -} - -// stateInString is the state after reading `"`. -func stateInString(s *scanner, c byte) scanStatus { - if c == '"' { - s.step = stateEndValue - return scanContinue - } - if c == '\\' { - s.step = stateInStringEsc - return scanContinue - } - if c < 0x20 { - return s.error(c, "in string literal") - } - return scanContinue -} - -// stateInStringEsc is the state after reading `"\` during a quoted string. -func stateInStringEsc(s *scanner, c byte) scanStatus { - switch c { - case 'b', 'f', 'n', 'r', 't', '\\', '/', '"': - s.step = stateInString - return scanContinue - case 'u': - s.step = stateInStringEscU - return scanContinue - } - return s.error(c, "in string escape code") -} - -// stateInStringEscU is the state after reading `"\u` during a quoted string. -func stateInStringEscU(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { - s.step = stateInStringEscU1 - return scanContinue - } - // numbers - return s.error(c, "in \\u hexadecimal character escape") -} - -// stateInStringEscU1 is the state after reading `"\u1` during a quoted string. -func stateInStringEscU1(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { - s.step = stateInStringEscU12 - return scanContinue - } - // numbers - return s.error(c, "in \\u hexadecimal character escape") -} - -// stateInStringEscU12 is the state after reading `"\u12` during a quoted string. -func stateInStringEscU12(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { - s.step = stateInStringEscU123 - return scanContinue - } - // numbers - return s.error(c, "in \\u hexadecimal character escape") -} - -// stateInStringEscU123 is the state after reading `"\u123` during a quoted string. -func stateInStringEscU123(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { - s.step = stateInString - return scanContinue - } - // numbers - return s.error(c, "in \\u hexadecimal character escape") -} - -// stateNeg is the state after reading `-` during a number. -func stateNeg(s *scanner, c byte) scanStatus { - if c == '0' { - s.step = state0 - return scanContinue - } - if '1' <= c && c <= '9' { - s.step = state1 - return scanContinue - } - return s.error(c, "in numeric literal") -} - -// state1 is the state after reading a non-zero integer during a number, -// such as after reading `1` or `100` but not `0`. -func state1(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' { - s.step = state1 - return scanContinue - } - return state0(s, c) -} - -// state0 is the state after reading `0` during a number. -func state0(s *scanner, c byte) scanStatus { - if c == '.' { - s.step = stateDot - return scanContinue - } - if c == 'e' || c == 'E' { - s.step = stateE - return scanContinue - } - return stateEndValue(s, c) -} - -// stateDot is the state after reading the integer and decimal point in a number, -// such as after reading `1.`. -func stateDot(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' { - s.step = stateDot0 - return scanContinue - } - return s.error(c, "after decimal point in numeric literal") -} - -// stateDot0 is the state after reading the integer, decimal point, and subsequent -// digits of a number, such as after reading `3.14`. -func stateDot0(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' { - return scanContinue - } - if c == 'e' || c == 'E' { - s.step = stateE - return scanContinue - } - return stateEndValue(s, c) -} - -// stateE is the state after reading the mantissa and e in a number, -// such as after reading `314e` or `0.314e`. -func stateE(s *scanner, c byte) scanStatus { - if c == '+' || c == '-' { - s.step = stateESign - return scanContinue - } - return stateESign(s, c) -} - -// stateESign is the state after reading the mantissa, e, and sign in a number, -// such as after reading `314e-` or `0.314e+`. -func stateESign(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' { - s.step = stateE0 - return scanContinue - } - return s.error(c, "in exponent of numeric literal") -} - -// stateE0 is the state after reading the mantissa, e, optional sign, -// and at least one digit of the exponent in a number, -// such as after reading `314e-2` or `0.314e+1` or `3.14e0`. -func stateE0(s *scanner, c byte) scanStatus { - if '0' <= c && c <= '9' { - return scanContinue - } - return stateEndValue(s, c) -} - -// stateT is the state after reading `t`. -func stateT(s *scanner, c byte) scanStatus { - if c == 'r' { - s.step = stateTr - return scanContinue - } - return s.error(c, "in literal true (expecting 'r')") -} - -// stateTr is the state after reading `tr`. -func stateTr(s *scanner, c byte) scanStatus { - if c == 'u' { - s.step = stateTru - return scanContinue - } - return s.error(c, "in literal true (expecting 'u')") -} - -// stateTru is the state after reading `tru`. -func stateTru(s *scanner, c byte) scanStatus { - if c == 'e' { - s.step = stateEndValue - return scanContinue - } - return s.error(c, "in literal true (expecting 'e')") -} - -// stateF is the state after reading `f`. -func stateF(s *scanner, c byte) scanStatus { - if c == 'a' { - s.step = stateFa - return scanContinue - } - return s.error(c, "in literal false (expecting 'a')") -} - -// stateFa is the state after reading `fa`. -func stateFa(s *scanner, c byte) scanStatus { - if c == 'l' { - s.step = stateFal - return scanContinue - } - return s.error(c, "in literal false (expecting 'l')") -} - -// stateFal is the state after reading `fal`. -func stateFal(s *scanner, c byte) scanStatus { - if c == 's' { - s.step = stateFals - return scanContinue - } - return s.error(c, "in literal false (expecting 's')") -} - -// stateFals is the state after reading `fals`. -func stateFals(s *scanner, c byte) scanStatus { - if c == 'e' { - s.step = stateEndValue - return scanContinue - } - return s.error(c, "in literal false (expecting 'e')") -} - -// stateN is the state after reading `n`. -func stateN(s *scanner, c byte) scanStatus { - if c == 'u' { - s.step = stateNu - return scanContinue - } - return s.error(c, "in literal null (expecting 'u')") -} - -// stateNu is the state after reading `nu`. -func stateNu(s *scanner, c byte) scanStatus { - if c == 'l' { - s.step = stateNul - return scanContinue - } - return s.error(c, "in literal null (expecting 'l')") -} - -// stateNul is the state after reading `nul`. -func stateNul(s *scanner, c byte) scanStatus { - if c == 'l' { - s.step = stateEndValue - return scanContinue - } - return s.error(c, "in literal null (expecting 'l')") -} - -// stateError is the state after reaching a syntax error, -// such as after reading `[1}` or `5.1.2`. -func stateError(s *scanner, c byte) scanStatus { - return scanError -} - -// error records an error and switches to the error state. -func (s *scanner) error(c byte, context string) scanStatus { - s.step = stateError - s.err = fmt.Errorf("invalid character <<%c>> %s", c, context) - return scanError -} diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go b/vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go new file mode 100644 index 000000000..fd8dd5202 --- /dev/null +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/json/parser.go @@ -0,0 +1,464 @@ +package json + +import ( + "bytes" + "sync" +) + +const ( + QueryNone = "json" + QueryGeo = "geo" + QueryHAR = "har" + QueryGLTF = "gltf" + maxRecursion = 4096 +) + +var queries = map[string][]query{ + QueryNone: nil, + QueryGeo: {{ + SearchPath: [][]byte{[]byte("type")}, + SearchVals: [][]byte{ + []byte(`"Feature"`), + []byte(`"FeatureCollection"`), + []byte(`"Point"`), + []byte(`"LineString"`), + []byte(`"Polygon"`), + []byte(`"MultiPoint"`), + []byte(`"MultiLineString"`), + []byte(`"MultiPolygon"`), + []byte(`"GeometryCollection"`), + }, + }}, + QueryHAR: {{ + SearchPath: [][]byte{[]byte("log"), []byte("version")}, + }, { + SearchPath: [][]byte{[]byte("log"), []byte("creator")}, + }, { + SearchPath: [][]byte{[]byte("log"), []byte("entries")}, + }}, + QueryGLTF: {{ + SearchPath: [][]byte{[]byte("asset"), []byte("version")}, + SearchVals: [][]byte{[]byte(`"1.0"`), []byte(`"2.0"`)}, + }}, +} + +var parserPool = sync.Pool{ + New: func() any { + return &parserState{maxRecursion: maxRecursion} + }, +} + +// parserState holds the state of JSON parsing. The number of inspected bytes, +// the current path inside the JSON object, etc. +type parserState struct { + // ib represents the number of inspected bytes. + // Because mimetype limits itself to only reading the header of the file, + // it means sometimes the input JSON can be truncated. In that case, we want + // to still detect it as JSON, even if it's invalid/truncated. + // When ib == len(input) it means the JSON was valid (at least the header). + ib int + maxRecursion int + // currPath keeps a track of the JSON keys parsed up. + // It works only for JSON objects. JSON arrays are ignored + // mainly because the functionality is not needed. + currPath [][]byte + // firstToken stores the first JSON token encountered in input. + // TODO: performance would be better if we would stop parsing as soon + // as we see that first token is not what we are interested in. + firstToken int + // querySatisfied is true if both path and value of any queries passed to + // consumeAny are satisfied. + querySatisfied bool +} + +// query holds information about a combination of {"key": "val"} that we're trying +// to search for inside the JSON. +type query struct { + // SearchPath represents the whole path to look for inside the JSON. + // ex: [][]byte{[]byte("foo"), []byte("bar")} matches {"foo": {"bar": "baz"}} + SearchPath [][]byte + // SearchVals represents values to look for when the SearchPath is found. + // Each SearchVal element is tried until one of them matches (logical OR.) + SearchVals [][]byte +} + +func eq(path1, path2 [][]byte) bool { + if len(path1) != len(path2) { + return false + } + for i := range path1 { + if !bytes.Equal(path1[i], path2[i]) { + return false + } + } + return true +} + +// LooksLikeObjectOrArray reports if first non white space character from raw +// is either { or [. Parsing raw as JSON is a heavy operation. When receiving some +// text input we can skip parsing if the input does not even look like JSON. +func LooksLikeObjectOrArray(raw []byte) bool { + for i := range raw { + if isSpace(raw[i]) { + continue + } + return raw[i] == '{' || raw[i] == '[' + } + + return false +} + +// Parse will take out a parser from the pool depending on queryType and tries +// to parse raw bytes as JSON. +func Parse(queryType string, raw []byte) (parsed, inspected, firstToken int, querySatisfied bool) { + p := parserPool.Get().(*parserState) + defer func() { + // Avoid hanging on to too much memory in extreme input cases. + if len(p.currPath) > 128 { + p.currPath = nil + } + parserPool.Put(p) + }() + p.reset() + + qs := queries[queryType] + got := p.consumeAny(raw, qs, 0) + return got, p.ib, p.firstToken, p.querySatisfied +} + +func (p *parserState) reset() { + p.ib = 0 + p.currPath = p.currPath[0:0] + p.firstToken = TokInvalid + p.querySatisfied = false +} + +func (p *parserState) consumeSpace(b []byte) (n int) { + for len(b) > 0 && isSpace(b[0]) { + b = b[1:] + n++ + p.ib++ + } + return n +} + +func (p *parserState) consumeConst(b, cnst []byte) int { + lb := len(b) + for i, c := range cnst { + if lb > i && b[i] == c { + p.ib++ + } else { + return 0 + } + } + return len(cnst) +} + +func (p *parserState) consumeString(b []byte) (n int) { + var c byte + for len(b[n:]) > 0 { + c, n = b[n], n+1 + p.ib++ + switch c { + case '\\': + if len(b[n:]) == 0 { + return 0 + } + switch b[n] { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + n++ + p.ib++ + continue + case 'u': + n++ + p.ib++ + for j := 0; j < 4 && len(b[n:]) > 0; j++ { + if !isXDigit(b[n]) { + return 0 + } + n++ + p.ib++ + } + continue + default: + return 0 + } + case '"': + return n + default: + continue + } + } + return 0 +} + +func (p *parserState) consumeNumber(b []byte) (n int) { + got := false + var i int + + if len(b) == 0 { + goto out + } + if b[0] == '-' { + b, i = b[1:], i+1 + p.ib++ + } + + for len(b) > 0 { + if !isDigit(b[0]) { + break + } + got = true + b, i = b[1:], i+1 + p.ib++ + } + if len(b) == 0 { + goto out + } + if b[0] == '.' { + b, i = b[1:], i+1 + p.ib++ + } + for len(b) > 0 { + if !isDigit(b[0]) { + break + } + got = true + b, i = b[1:], i+1 + p.ib++ + } + if len(b) == 0 { + goto out + } + if got && (b[0] == 'e' || b[0] == 'E') { + b, i = b[1:], i+1 + p.ib++ + got = false + if len(b) == 0 { + goto out + } + if b[0] == '+' || b[0] == '-' { + b, i = b[1:], i+1 + p.ib++ + } + for len(b) > 0 { + if !isDigit(b[0]) { + break + } + got = true + b, i = b[1:], i+1 + p.ib++ + } + } +out: + if got { + return i + } + return 0 +} + +func (p *parserState) consumeArray(b []byte, qs []query, lvl int) (n int) { + p.currPath = append(p.currPath, []byte{'['}) + if len(b) == 0 { + return 0 + } + + for n < len(b) { + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + if b[n] == ']' { + p.ib++ + p.currPath = p.currPath[:len(p.currPath)-1] + return n + 1 + } + innerParsed := p.consumeAny(b[n:], qs, lvl) + if innerParsed == 0 { + return 0 + } + n += innerParsed + if len(b[n:]) == 0 { + return 0 + } + switch b[n] { + case ',': + n += 1 + p.ib++ + continue + case ']': + p.ib++ + return n + 1 + default: + return 0 + } + } + return 0 +} + +func queryPathMatch(qs []query, path [][]byte) int { + for i := range qs { + if eq(qs[i].SearchPath, path) { + return i + } + } + return -1 +} + +func (p *parserState) consumeObject(b []byte, qs []query, lvl int) (n int) { + for n < len(b) { + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + if b[n] == '}' { + p.ib++ + return n + 1 + } + if b[n] != '"' { + return 0 + } else { + n += 1 + p.ib++ + } + // queryMatched stores the index of the query satisfying the current path. + queryMatched := -1 + if keyLen := p.consumeString(b[n:]); keyLen == 0 { + return 0 + } else { + p.currPath = append(p.currPath, b[n:n+keyLen-1]) + if !p.querySatisfied { + queryMatched = queryPathMatch(qs, p.currPath) + } + n += keyLen + } + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + if b[n] != ':' { + return 0 + } else { + n += 1 + p.ib++ + } + n += p.consumeSpace(b[n:]) + if len(b[n:]) == 0 { + return 0 + } + + if valLen := p.consumeAny(b[n:], qs, lvl); valLen == 0 { + return 0 + } else { + if queryMatched != -1 { + q := qs[queryMatched] + if len(q.SearchVals) == 0 { + p.querySatisfied = true + } + for _, val := range q.SearchVals { + if bytes.Equal(val, bytes.TrimSpace(b[n:n+valLen])) { + p.querySatisfied = true + } + } + } + n += valLen + } + if len(b[n:]) == 0 { + return 0 + } + switch b[n] { + case ',': + p.currPath = p.currPath[:len(p.currPath)-1] + n++ + p.ib++ + continue + case '}': + p.currPath = p.currPath[:len(p.currPath)-1] + p.ib++ + return n + 1 + default: + return 0 + } + } + return 0 +} + +func (p *parserState) consumeAny(b []byte, qs []query, lvl int) (n int) { + // Avoid too much recursion. + if p.maxRecursion != 0 && lvl > p.maxRecursion { + return 0 + } + n += p.consumeSpace(b) + if len(b[n:]) == 0 { + return 0 + } + + var t, rv int + switch b[n] { + case '"': + n++ + p.ib++ + rv = p.consumeString(b[n:]) + t = TokString + case '[': + n++ + p.ib++ + rv = p.consumeArray(b[n:], qs, lvl+1) + t = TokArray + case '{': + n++ + p.ib++ + rv = p.consumeObject(b[n:], qs, lvl+1) + t = TokObject + case 't': + rv = p.consumeConst(b[n:], []byte("true")) + t = TokTrue + case 'f': + rv = p.consumeConst(b[n:], []byte("false")) + t = TokFalse + case 'n': + rv = p.consumeConst(b[n:], []byte("null")) + t = TokNull + default: + rv = p.consumeNumber(b[n:]) + t = TokNumber + } + if lvl == 0 { + p.firstToken = t + } + if len(qs) == 0 { + p.querySatisfied = true + } + if rv <= 0 { + return n + } + n += rv + n += p.consumeSpace(b[n:]) + return n +} + +func isSpace(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' +} +func isDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +func isXDigit(c byte) bool { + if isDigit(c) { + return true + } + return ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') +} + +const ( + TokInvalid = 0 + TokNull = 1 << iota + TokTrue + TokFalse + TokNumber + TokString + TokArray + TokObject + TokComma +) diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go index 068d00f79..dd7f2417c 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go @@ -137,7 +137,7 @@ func tarParseOctal(b []byte) int64 { if b == 0 { break } - if !(b >= '0' && b <= '7') { + if b < '0' || b > '7' { return -1 } ret = (ret << 3) | int64(b-'0') diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go index 769732018..70599b342 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go @@ -71,7 +71,7 @@ func Dbf(raw []byte, limit uint32) bool { } // 3rd and 4th bytes contain the last update month and day of month. - if !(0 < raw[2] && raw[2] < 13 && 0 < raw[3] && raw[3] < 32) { + if raw[2] == 0 || raw[2] > 12 || raw[3] == 0 || raw[3] > 31 { return false } @@ -153,7 +153,7 @@ func Marc(raw []byte, limit uint32) bool { return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E}) } -// Glb matches a glTF model format file. +// GLB matches a glTF model format file. // GLB is the binary file format representation of 3D models saved in // the GL transmission Format (glTF). // GLB uses little endian and its header structure is as follows: @@ -168,7 +168,7 @@ func Marc(raw []byte, limit uint32) bool { // // [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html // [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary -var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), +var GLB = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), []byte("\x67\x6C\x54\x46\x01\x00\x00\x00")) // TzIf matches a Time Zone Information Format (TZif) file. diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go index f077e1672..cade91f18 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go @@ -12,13 +12,13 @@ func Shp(raw []byte, limit uint32) bool { return false } - if !(binary.BigEndian.Uint32(raw[0:4]) == 9994 && - binary.BigEndian.Uint32(raw[4:8]) == 0 && - binary.BigEndian.Uint32(raw[8:12]) == 0 && - binary.BigEndian.Uint32(raw[12:16]) == 0 && - binary.BigEndian.Uint32(raw[16:20]) == 0 && - binary.BigEndian.Uint32(raw[20:24]) == 0 && - binary.LittleEndian.Uint32(raw[28:32]) == 1000) { + if binary.BigEndian.Uint32(raw[0:4]) != 9994 || + binary.BigEndian.Uint32(raw[4:8]) != 0 || + binary.BigEndian.Uint32(raw[8:12]) != 0 || + binary.BigEndian.Uint32(raw[12:16]) != 0 || + binary.BigEndian.Uint32(raw[16:20]) != 0 || + binary.BigEndian.Uint32(raw[20:24]) != 0 || + binary.LittleEndian.Uint32(raw[28:32]) != 1000 { return false } diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go index cf6446397..8178e4707 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go @@ -2,7 +2,6 @@ package magic import ( "bytes" - "strings" "time" "github.com/gabriel-vasile/mimetype/internal/charset" @@ -154,145 +153,75 @@ func Php(raw []byte, limit uint32) bool { // JSON matches a JavaScript Object Notation file. func JSON(raw []byte, limit uint32) bool { - raw = trimLWS(raw) // #175 A single JSON string, number or bool is not considered JSON. // JSON objects and arrays are reported as JSON. - if len(raw) < 2 || (raw[0] != '[' && raw[0] != '{') { - return false - } - parsed, err := json.Scan(raw) - // If the full file content was provided, check there is no error. - if limit == 0 || len(raw) < int(limit) { - return err == nil - } - - // If a section of the file was provided, check if all of it was parsed. - return parsed == len(raw) && len(raw) > 0 + return jsonHelper(raw, limit, json.QueryNone, json.TokObject|json.TokArray) } // GeoJSON matches a RFC 7946 GeoJSON file. // // GeoJSON detection implies searching for key:value pairs like: `"type": "Feature"` // in the input. -// BUG(gabriel-vasile): The "type" key should be searched for in the root object. func GeoJSON(raw []byte, limit uint32) bool { - raw = trimLWS(raw) - if len(raw) == 0 { + return jsonHelper(raw, limit, json.QueryGeo, json.TokObject) +} + +// HAR matches a HAR Spec file. +// Spec: http://www.softwareishard.com/blog/har-12-spec/ +func HAR(raw []byte, limit uint32) bool { + return jsonHelper(raw, limit, json.QueryHAR, json.TokObject) +} + +// GLTF matches a GL Transmission Format (JSON) file. +// Visit [glTF specification] and [IANA glTF entry] for more details. +// +// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html +// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf+json +func GLTF(raw []byte, limit uint32) bool { + return jsonHelper(raw, limit, json.QueryGLTF, json.TokObject) +} + +func jsonHelper(raw []byte, limit uint32, q string, wantTok int) bool { + if !json.LooksLikeObjectOrArray(raw) { return false } - // GeoJSON is always a JSON object, not a JSON array or any other JSON value. - if raw[0] != '{' { + lraw := len(raw) + parsed, inspected, firstToken, querySatisfied := json.Parse(q, raw) + if !querySatisfied || firstToken&wantTok == 0 { return false } - s := []byte(`"type"`) - si, sl := bytes.Index(raw, s), len(s) - - if si == -1 { - return false + // If the full file content was provided, check that the whole input was parsed. + if limit == 0 || lraw < int(limit) { + return parsed == lraw } - // If the "type" string is the suffix of the input, - // there is no need to search for the value of the key. - if si+sl == len(raw) { - return false - } - // Skip the "type" part. - raw = raw[si+sl:] - // Skip any whitespace before the colon. - raw = trimLWS(raw) - // Check for colon. - if len(raw) == 0 || raw[0] != ':' { - return false - } - // Skip any whitespace after the colon. - raw = trimLWS(raw[1:]) - - geoJSONTypes := [][]byte{ - []byte(`"Feature"`), - []byte(`"FeatureCollection"`), - []byte(`"Point"`), - []byte(`"LineString"`), - []byte(`"Polygon"`), - []byte(`"MultiPoint"`), - []byte(`"MultiLineString"`), - []byte(`"MultiPolygon"`), - []byte(`"GeometryCollection"`), - } - for _, t := range geoJSONTypes { - if bytes.HasPrefix(raw, t) { - return true - } - } - - return false + // If a section of the file was provided, check if all of it was inspected. + // In other words, check that if there was a problem parsing, that problem + // occured at the last byte in the input. + return inspected == lraw && lraw > 0 } // NdJSON matches a Newline delimited JSON file. All complete lines from raw // must be valid JSON documents meaning they contain one of the valid JSON data // types. func NdJSON(raw []byte, limit uint32) bool { - lCount, hasObjOrArr := 0, false + lCount, objOrArr := 0, 0 raw = dropLastLine(raw, limit) var l []byte for len(raw) != 0 { l, raw = scanLine(raw) - // Empty lines are allowed in NDJSON. - if l = trimRWS(trimLWS(l)); len(l) == 0 { - continue - } - _, err := json.Scan(l) - if err != nil { + _, inspected, firstToken, _ := json.Parse(json.QueryNone, l) + if len(l) != inspected { return false } - if l[0] == '[' || l[0] == '{' { - hasObjOrArr = true + if firstToken == json.TokArray || firstToken == json.TokObject { + objOrArr++ } lCount++ } - return lCount > 1 && hasObjOrArr -} - -// HAR matches a HAR Spec file. -// Spec: http://www.softwareishard.com/blog/har-12-spec/ -func HAR(raw []byte, limit uint32) bool { - s := []byte(`"log"`) - si, sl := bytes.Index(raw, s), len(s) - - if si == -1 { - return false - } - - // If the "log" string is the suffix of the input, - // there is no need to search for the value of the key. - if si+sl == len(raw) { - return false - } - // Skip the "log" part. - raw = raw[si+sl:] - // Skip any whitespace before the colon. - raw = trimLWS(raw) - // Check for colon. - if len(raw) == 0 || raw[0] != ':' { - return false - } - // Skip any whitespace after the colon. - raw = trimLWS(raw[1:]) - - harJSONTypes := [][]byte{ - []byte(`"version"`), - []byte(`"creator"`), - []byte(`"entries"`), - } - for _, t := range harJSONTypes { - si := bytes.Index(raw, t) - if si > -1 { - return true - } - } - - return false + return lCount > 1 && objOrArr > 0 } // Svg matches a SVG file. @@ -305,32 +234,31 @@ func Srt(raw []byte, _ uint32) bool { line, raw := scanLine(raw) // First line must be 1. - if string(line) != "1" { + if len(line) != 1 || line[0] != '1' { return false } line, raw = scanLine(raw) - secondLine := string(line) - // Timestamp format (e.g: 00:02:16,612 --> 00:02:19,376) limits secondLine + // Timestamp format (e.g: 00:02:16,612 --> 00:02:19,376) limits second line // length to exactly 29 characters. - if len(secondLine) != 29 { + if len(line) != 29 { return false } // Decimal separator of fractional seconds in the timestamps must be a // comma, not a period. - if strings.Contains(secondLine, ".") { + if bytes.IndexByte(line, '.') != -1 { return false } - // Second line must be a time range. - ts := strings.Split(secondLine, " --> ") - if len(ts) != 2 { + sep := []byte(" --> ") + i := bytes.Index(line, sep) + if i == -1 { return false } const layout = "15:04:05,000" - t0, err := time.Parse(layout, ts[0]) + t0, err := time.Parse(layout, string(line[:i])) if err != nil { return false } - t1, err := time.Parse(layout, ts[1]) + t1, err := time.Parse(layout, string(line[i+len(sep):])) if err != nil { return false } diff --git a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md index f9bf03cba..6f45bfbb6 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md +++ b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md @@ -1,4 +1,4 @@ -## 178 Supported MIME types +## 179 Supported MIME types This file is automatically generated when running tests. Do not edit manually. Extension | MIME type | Aliases @@ -171,6 +171,7 @@ Extension | MIME type | Aliases **.json** | application/json | - **.geojson** | application/geo+json | - **.har** | application/json | - +**.gltf** | model/gltf+json | - **.ndjson** | application/x-ndjson | - **.rtf** | text/rtf | application/rtf **.srt** | application/x-subrip | application/x-srt, text/x-srt diff --git a/vendor/github.com/gabriel-vasile/mimetype/tree.go b/vendor/github.com/gabriel-vasile/mimetype/tree.go index b5f566227..63a2093a4 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/tree.go +++ b/vendor/github.com/gabriel-vasile/mimetype/tree.go @@ -83,7 +83,7 @@ var ( text = newMIME("text/plain", ".txt", magic.Text, html, svg, xml, php, js, lua, perl, python, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt) xml = newMIME("text/xml", ".xml", magic.XML, rss, atom, x3d, kml, xliff, collada, gml, gpx, tcx, amf, threemf, xfdf, owl2). alias("application/xml") - json = newMIME("application/json", ".json", magic.JSON, geoJSON, har) + json = newMIME("application/json", ".json", magic.JSON, geoJSON, har, gltf) har = newMIME("application/json", ".har", magic.HAR) csv = newMIME("text/csv", ".csv", magic.Csv) tsv = newMIME("text/tab-separated-values", ".tsv", magic.Tsv) @@ -262,7 +262,8 @@ var ( pat = newMIME("image/x-gimp-pat", ".pat", magic.Pat) gbr = newMIME("image/x-gimp-gbr", ".gbr", magic.Gbr) xfdf = newMIME("application/vnd.adobe.xfdf", ".xfdf", magic.Xfdf) - glb = newMIME("model/gltf-binary", ".glb", magic.Glb) + glb = newMIME("model/gltf-binary", ".glb", magic.GLB) + gltf = newMIME("model/gltf+json", ".gltf", magic.GLTF) jxr = newMIME("image/jxr", ".jxr", magic.Jxr).alias("image/vnd.ms-photo") parquet = newMIME("application/vnd.apache.parquet", ".parquet", magic.Par1). alias("application/x-parquet") diff --git a/vendor/github.com/gin-contrib/cors/README.md b/vendor/github.com/gin-contrib/cors/README.md index d43523295..a8747dd6a 100644 --- a/vendor/github.com/gin-contrib/cors/README.md +++ b/vendor/github.com/gin-contrib/cors/README.md @@ -1,47 +1,89 @@ -# CORS gin's middleware +# gin-contrib/cors [![Run Tests](https://github.com/gin-contrib/cors/actions/workflows/go.yml/badge.svg)](https://github.com/gin-contrib/cors/actions/workflows/go.yml) [![codecov](https://codecov.io/gh/gin-contrib/cors/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-contrib/cors) [![Go Report Card](https://goreportcard.com/badge/github.com/gin-contrib/cors)](https://goreportcard.com/report/github.com/gin-contrib/cors) [![GoDoc](https://godoc.org/github.com/gin-contrib/cors?status.svg)](https://godoc.org/github.com/gin-contrib/cors) -Gin middleware/handler to enable CORS support. +- [gin-contrib/cors](#gin-contribcors) + - [Overview](#overview) + - [Installation](#installation) + - [Quick Start](#quick-start) + - [Advanced Usage](#advanced-usage) + - [Custom Configuration](#custom-configuration) + - [DefaultConfig Reference](#defaultconfig-reference) + - [Default() Convenience](#default-convenience) + - [Configuration Reference](#configuration-reference) + - [Notes on Configuration](#notes-on-configuration) + - [Examples](#examples) + - [Advanced Options](#advanced-options) + - [Custom Origin Validation](#custom-origin-validation) + - [With Gin Context](#with-gin-context) + - [Helper Methods](#helper-methods) + - [Validation \& Error Handling](#validation--error-handling) + - [Important Notes](#important-notes) -## Usage +--- -### Start using it +## Overview -Download and install it: +**CORS (Cross-Origin Resource Sharing)** middleware for [Gin](https://github.com/gin-gonic/gin). + +- Enables flexible CORS handling for your Gin-based APIs. +- Highly configurable: origins, methods, headers, credentials, and more. + +--- + +## Installation ```sh go get github.com/gin-contrib/cors ``` -Import it in your code: +Import in your Go code: ```go import "github.com/gin-contrib/cors" ``` -### Canonical example +--- + +## Quick Start + +Allow all origins (default): ```go -package main - import ( - "time" - "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" ) func main() { router := gin.Default() - // CORS for https://foo.com and https://github.com origins, allowing: - // - PUT and PATCH methods - // - Origin header - // - Credentials share - // - Preflight requests cached for 12 hours + router.Use(cors.Default()) // All origins allowed by default + router.Run() +} +``` + +> ⚠️ **Warning:** Allowing all origins disables cookies for clients. For credentialed requests, **do not** allow all origins. + +--- + +## Advanced Usage + +### Custom Configuration + +Configure allowed origins, methods, headers, and more: + +```go +import ( + "time" + "github.com/gin-contrib/cors" + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() router.Use(cors.New(cors.Config{ AllowOrigins: []string{"https://foo.com"}, AllowMethods: []string{"PUT", "PATCH"}, @@ -57,15 +99,20 @@ func main() { } ``` -### Using DefaultConfig as start point +--- + +### DefaultConfig Reference + +Start with library defaults and customize as needed: ```go +import ( + "github.com/gin-contrib/cors" + "github.com/gin-gonic/gin" +) + func main() { router := gin.Default() - // - No origin allowed by default - // - GET,POST, PUT, HEAD methods - // - Credentials share disabled - // - Preflight requests cached for 12 hours config := cors.DefaultConfig() config.AllowOrigins = []string{"http://google.com"} // config.AllowOrigins = []string{"http://google.com", "http://facebook.com"} @@ -76,20 +123,124 @@ func main() { } ``` -Note: while Default() allows all origins, DefaultConfig() does not and you will still have to use AllowAllOrigins. +> **Note:** `Default()` allows all origins, but `DefaultConfig()` does **not**. To allow all origins, set `AllowAllOrigins = true`. -### Default() allows all origins +--- + +### Default() Convenience + +Enable all origins with a single call: ```go -func main() { - router := gin.Default() - // same as - // config := cors.DefaultConfig() - // config.AllowAllOrigins = true - // router.Use(cors.New(config)) - router.Use(cors.Default()) - router.Run() +router.Use(cors.Default()) // Equivalent to AllowAllOrigins = true +``` + +--- + +## Configuration Reference + +The middleware is controlled via the `cors.Config` struct. All fields are optional unless otherwise stated. + +| Field | Type | Default | Description | +|-------------------------------|-----------------------------|-----------------------------------------------------------|-----------------------------------------------------------------------------------------------| +| `AllowAllOrigins` | `bool` | `false` | If true, allows all origins. Credentials **cannot** be used. | +| `AllowOrigins` | `[]string` | `[]` | List of allowed origins. Supports exact match, `*`, and wildcards. | +| `AllowOriginFunc` | `func(string) bool` | `nil` | Custom function to validate origin. If set, `AllowOrigins` is ignored. | +| `AllowOriginWithContextFunc` | `func(*gin.Context,string)bool` | `nil` | Like `AllowOriginFunc`, but with request context. | +| `AllowMethods` | `[]string` | `[]string{"GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"}` | Allowed HTTP methods. | +| `AllowPrivateNetwork` | `bool` | `false` | Adds [Private Network Access](https://wicg.github.io/private-network-access/) CORS header. | +| `AllowHeaders` | `[]string` | `[]` | List of non-simple headers permitted in requests. | +| `AllowCredentials` | `bool` | `false` | Allow cookies, HTTP auth, or client certs. Only if precise origins are used. | +| `ExposeHeaders` | `[]string` | `[]` | Headers exposed to the browser. | +| `MaxAge` | `time.Duration` | `12 * time.Hour` | Cache time for preflight requests. | +| `AllowWildcard` | `bool` | `false` | Enables wildcards in origins (e.g. `https://*.example.com`). | +| `AllowBrowserExtensions` | `bool` | `false` | Allow browser extension schemes as origins (e.g. `chrome-extension://`). | +| `CustomSchemas` | `[]string` | `nil` | Additional allowed URI schemes (e.g. `tauri://`). | +| `AllowWebSockets` | `bool` | `false` | Allow `ws://` and `wss://` schemas. | +| `AllowFiles` | `bool` | `false` | Allow `file://` origins (dangerous; use only if necessary). | +| `OptionsResponseStatusCode` | `int` | `204` | Custom status code for `OPTIONS` responses. | + +--- + +### Notes on Configuration + +- Only one of `AllowAllOrigins`, `AllowOrigins`, `AllowOriginFunc`, or `AllowOriginWithContextFunc` should be set. +- If `AllowAllOrigins` is true, other origin settings are ignored and credentialed requests are not allowed. +- If `AllowWildcard` is enabled, only one `*` is allowed per origin string. +- Use `AllowBrowserExtensions`, `AllowWebSockets`, or `AllowFiles` to permit non-HTTP(s) protocols as origins. +- Custom schemas allow, for example, usage in desktop apps via custom URI schemes (`tauri://`, etc.). +- If both `AllowOriginFunc` and `AllowOriginWithContextFunc` are set, the context-specific function is preferred. + +--- + +### Examples + +#### Advanced Options + +```go +config := cors.Config{ + AllowOrigins: []string{"https://*.foo.com", "https://bar.com"}, + AllowWildcard: true, + AllowMethods: []string{"GET", "POST"}, + AllowHeaders: []string{"Authorization", "Content-Type"}, + AllowCredentials: true, + AllowBrowserExtensions: true, + AllowWebSockets: true, + AllowFiles: false, + CustomSchemas: []string{"tauri://"}, + MaxAge: 24 * time.Hour, + ExposeHeaders: []string{"X-Custom-Header"}, + AllowPrivateNetwork: true, } ``` -Using all origins disables the ability for Gin to set cookies for clients. When dealing with credentials, don't allow all origins. +#### Custom Origin Validation + +```go +config := cors.Config{ + AllowOriginFunc: func(origin string) bool { + // Allow any github.com subdomain or a custom rule + return strings.HasSuffix(origin, "github.com") + }, +} +``` + +#### With Gin Context + +```go +config := cors.Config{ + AllowOriginWithContextFunc: func(c *gin.Context, origin string) bool { + // Allow only if a certain header is present + return c.Request.Header.Get("X-Allow-CORS") == "yes" + }, +} +``` + +--- + +## Helper Methods + +Dynamically add methods or headers to the config: + +```go +config.AddAllowMethods("DELETE", "OPTIONS") +config.AddAllowHeaders("X-My-Header") +config.AddExposeHeaders("X-Other-Header") +``` + +--- + +## Validation & Error Handling + +- Calling `Validate()` on a `Config` checks for misconfiguration (called internally). +- If `AllowAllOrigins` is set, you cannot also set `AllowOrigins` or any `AllowOriginFunc`. +- If neither `AllowAllOrigins`, `AllowOriginFunc`, nor `AllowOrigins` is set, an error is raised. +- If an `AllowOrigin` contains a wildcard but `AllowWildcard` is not enabled, or more than one `*` is present, a panic is triggered. +- Invalid origin schemas or unsupported wildcards are rejected. + +--- + +## Important Notes + +- **Enabling all origins disables cookies:** When `AllowAllOrigins` is enabled, Gin cannot set cookies for clients. If you need credential sharing (cookies, authentication headers), **do not** allow all origins. +- For detailed documentation and configuration options, see the [GoDoc](https://godoc.org/github.com/gin-contrib/cors). diff --git a/vendor/github.com/gin-contrib/cors/config.go b/vendor/github.com/gin-contrib/cors/config.go index a955c3171..76e15a880 100644 --- a/vendor/github.com/gin-contrib/cors/config.go +++ b/vendor/github.com/gin-contrib/cors/config.go @@ -87,7 +87,7 @@ func (cors *cors) applyCors(c *gin.Context) { return } - if c.Request.Method == "OPTIONS" { + if c.Request.Method == http.MethodOptions { cors.handlePreflight(c) defer c.AbortWithStatus(cors.optionsResponseStatusCode) } else { diff --git a/vendor/github.com/gin-contrib/sse/.golangci.yml b/vendor/github.com/gin-contrib/sse/.golangci.yml index 4c44c5fae..47094ac61 100644 --- a/vendor/github.com/gin-contrib/sse/.golangci.yml +++ b/vendor/github.com/gin-contrib/sse/.golangci.yml @@ -1,3 +1,50 @@ +version: "2" linters: - disable: + default: none + enable: + - bodyclose + - dogsled + - dupl - errcheck + - exhaustive + - gochecknoinits + - goconst + - gocritic + - gocyclo + - goprintffuncname + - gosec + - govet + - ineffassign + - lll + - misspell + - nakedret + - noctx + - nolintlint + - rowserrcheck + - staticcheck + - unconvert + - unparam + - unused + - whitespace + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/gin-contrib/sse/sse-decoder.go b/vendor/github.com/gin-contrib/sse/sse-decoder.go index fd49b9c37..da2c2d4b6 100644 --- a/vendor/github.com/gin-contrib/sse/sse-decoder.go +++ b/vendor/github.com/gin-contrib/sse/sse-decoder.go @@ -7,7 +7,6 @@ package sse import ( "bytes" "io" - "io/ioutil" ) type decoder struct { @@ -22,7 +21,8 @@ func Decode(r io.Reader) ([]Event, error) { func (d *decoder) dispatchEvent(event Event, data string) { dataLength := len(data) if dataLength > 0 { - //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer. + // If the data buffer's last character is a U+000A LINE FEED (LF) character, + // then remove the last character from the data buffer. data = data[:dataLength-1] dataLength-- } @@ -37,13 +37,13 @@ func (d *decoder) dispatchEvent(event Event, data string) { } func (d *decoder) decode(r io.Reader) ([]Event, error) { - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { return nil, err } var currentEvent Event - var dataBuffer *bytes.Buffer = new(bytes.Buffer) + dataBuffer := new(bytes.Buffer) // TODO (and unit tests) // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair, // a single U+000A LINE FEED (LF) character, @@ -96,7 +96,8 @@ func (d *decoder) decode(r io.Reader) ([]Event, error) { currentEvent.Id = string(value) case "retry": // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9), - // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer. + // then interpret the field value as an integer in base ten, and set the event stream's + // reconnection time to that integer. // Otherwise, ignore the field. currentEvent.Id = string(value) case "data": @@ -105,7 +106,7 @@ func (d *decoder) decode(r io.Reader) ([]Event, error) { // then append a single U+000A LINE FEED (LF) character to the data buffer. dataBuffer.WriteString("\n") default: - //Otherwise. The field is ignored. + // Otherwise. The field is ignored. continue } } diff --git a/vendor/github.com/gin-contrib/sse/sse-encoder.go b/vendor/github.com/gin-contrib/sse/sse-encoder.go index 0d26c82f0..9ebb49f41 100644 --- a/vendor/github.com/gin-contrib/sse/sse-encoder.go +++ b/vendor/github.com/gin-contrib/sse/sse-encoder.go @@ -20,8 +20,10 @@ import ( const ContentType = "text/event-stream;charset=utf-8" -var contentType = []string{ContentType} -var noCache = []string{"no-cache"} +var ( + contentType = []string{ContentType} + noCache = []string{"no-cache"} +) var fieldReplacer = strings.NewReplacer( "\n", "\\n", @@ -48,48 +50,48 @@ func Encode(writer io.Writer, event Event) error { func writeId(w stringWriter, id string) { if len(id) > 0 { - w.WriteString("id:") - fieldReplacer.WriteString(w, id) - w.WriteString("\n") + _, _ = w.WriteString("id:") + _, _ = fieldReplacer.WriteString(w, id) + _, _ = w.WriteString("\n") } } func writeEvent(w stringWriter, event string) { if len(event) > 0 { - w.WriteString("event:") - fieldReplacer.WriteString(w, event) - w.WriteString("\n") + _, _ = w.WriteString("event:") + _, _ = fieldReplacer.WriteString(w, event) + _, _ = w.WriteString("\n") } } func writeRetry(w stringWriter, retry uint) { if retry > 0 { - w.WriteString("retry:") - w.WriteString(strconv.FormatUint(uint64(retry), 10)) - w.WriteString("\n") + _, _ = w.WriteString("retry:") + _, _ = w.WriteString(strconv.FormatUint(uint64(retry), 10)) + _, _ = w.WriteString("\n") } } func writeData(w stringWriter, data interface{}) error { - w.WriteString("data:") + _, _ = w.WriteString("data:") bData, ok := data.([]byte) if ok { - dataReplacer.WriteString(w, string(bData)) - w.WriteString("\n\n") + _, _ = dataReplacer.WriteString(w, string(bData)) + _, _ = w.WriteString("\n\n") return nil } - switch kindOfData(data) { + switch kindOfData(data) { //nolint:exhaustive case reflect.Struct, reflect.Slice, reflect.Map: err := json.NewEncoder(w).Encode(data) if err != nil { return err } - w.WriteString("\n") + _, _ = w.WriteString("\n") default: - dataReplacer.WriteString(w, fmt.Sprint(data)) - w.WriteString("\n\n") + _, _ = dataReplacer.WriteString(w, fmt.Sprint(data)) + _, _ = w.WriteString("\n\n") } return nil } diff --git a/vendor/github.com/gin-contrib/sse/writer.go b/vendor/github.com/gin-contrib/sse/writer.go index 6f9806c55..724d9d07d 100644 --- a/vendor/github.com/gin-contrib/sse/writer.go +++ b/vendor/github.com/gin-contrib/sse/writer.go @@ -12,7 +12,7 @@ type stringWrapper struct { } func (w stringWrapper) WriteString(str string) (int, error) { - return w.Writer.Write([]byte(str)) + return w.Write([]byte(str)) } func checkWriter(writer io.Writer) stringWriter { diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 0cffafa7b..0ed62c1a1 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -1,26 +1,28 @@ +version: "2" + run: timeout: 1m tests: true linters: - disable-all: true - enable: + default: none + enable: # please keep this alphabetized + - asasalint - asciicheck + - copyloopvar + - dupl - errcheck - forcetypeassert + - goconst - gocritic - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell + - musttag - revive - staticcheck - - typecheck - unused issues: - exclude-use-default: false max-issues-per-linter: 0 max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 30568e768..b22c57d71 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { write: fn, } // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) + l.AddCallDepth(1) // via Formatter return l } @@ -164,17 +164,17 @@ type fnlogger struct { } func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) + l.AddName(name) // via Formatter return &l } func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.Formatter.AddValues(kvList) + l.AddValues(kvList) // via Formatter return &l } func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) + l.AddCallDepth(depth) // via Formatter return &l } diff --git a/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go b/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go index ed47da338..58f6a945b 100644 --- a/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go +++ b/vendor/github.com/go-swagger/go-swagger/cmd/swagger/commands/generate/spec.go @@ -30,17 +30,18 @@ import ( // SpecFile command to generate a swagger spec from a go application type SpecFile struct { - WorkDir string `long:"work-dir" short:"w" description:"the base path to use" default:"."` - BuildTags string `long:"tags" short:"t" description:"build tags" default:""` - ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"` - Compact bool `long:"compact" description:"when present, doesn't prettify the json"` - Output flags.Filename `long:"output" short:"o" description:"the file to write to"` - Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"` - Include []string `long:"include" short:"c" description:"include packages matching pattern"` - Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"` - IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"` - ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"` - ExcludeDeps bool `long:"exclude-deps" short:"" description:"exclude all dependencies of project"` + WorkDir string `long:"work-dir" short:"w" description:"the base path to use" default:"."` + BuildTags string `long:"tags" short:"t" description:"build tags" default:""` + ScanModels bool `long:"scan-models" short:"m" description:"includes models that were annotated with 'swagger:model'"` + Compact bool `long:"compact" description:"when present, doesn't prettify the json"` + Output flags.Filename `long:"output" short:"o" description:"the file to write to"` + Input flags.Filename `long:"input" short:"i" description:"an input swagger file with which to merge"` + Include []string `long:"include" short:"c" description:"include packages matching pattern"` + Exclude []string `long:"exclude" short:"x" description:"exclude packages matching pattern"` + IncludeTags []string `long:"include-tag" short:"" description:"include routes having specified tags (can be specified many times)"` + ExcludeTags []string `long:"exclude-tag" short:"" description:"exclude routes having specified tags (can be specified many times)"` + ExcludeDeps bool `long:"exclude-deps" short:"" description:"exclude all dependencies of project"` + SetXNullableForPointers bool `long:"nullable-pointers" short:"n" description:"set x-nullable extension to true automatically for fields of pointer types without 'omitempty'"` } // Execute runs this command @@ -65,6 +66,7 @@ func (s *SpecFile) Execute(args []string) error { opts.IncludeTags = s.IncludeTags opts.ExcludeTags = s.ExcludeTags opts.ExcludeDeps = s.ExcludeDeps + opts.SetXNullableForPointers = s.SetXNullableForPointers swspec, err := codescan.Run(&opts) if err != nil { return err diff --git a/vendor/github.com/go-swagger/go-swagger/codescan/application.go b/vendor/github.com/go-swagger/go-swagger/codescan/application.go index b7051ab85..ebaa6261a 100644 --- a/vendor/github.com/go-swagger/go-swagger/codescan/application.go +++ b/vendor/github.com/go-swagger/go-swagger/codescan/application.go @@ -42,16 +42,17 @@ const ( // Options for the scanner type Options struct { - Packages []string - InputSpec *spec.Swagger - ScanModels bool - WorkDir string - BuildTags string - ExcludeDeps bool - Include []string - Exclude []string - IncludeTags []string - ExcludeTags []string + Packages []string + InputSpec *spec.Swagger + ScanModels bool + WorkDir string + BuildTags string + ExcludeDeps bool + Include []string + Exclude []string + IncludeTags []string + ExcludeTags []string + SetXNullableForPointers bool } type scanCtx struct { @@ -94,7 +95,7 @@ func newScanCtx(opts *Options) (*scanCtx, error) { app, err := newTypeIndex(pkgs, opts.ExcludeDeps, sliceToSet(opts.IncludeTags), sliceToSet(opts.ExcludeTags), - opts.Include, opts.Exclude) + opts.Include, opts.Exclude, opts.SetXNullableForPointers) if err != nil { return nil, err } @@ -418,16 +419,17 @@ func (s *scanCtx) FindEnumValues(pkg *packages.Package, enumName string) (list [ return list, descList, true } -func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, excludeTags map[string]bool, includePkgs, excludePkgs []string) (*typeIndex, error) { +func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, excludeTags map[string]bool, includePkgs, excludePkgs []string, setXNullableForPointers bool) (*typeIndex, error) { ac := &typeIndex{ - AllPackages: make(map[string]*packages.Package), - Models: make(map[*ast.Ident]*entityDecl), - ExtraModels: make(map[*ast.Ident]*entityDecl), - excludeDeps: excludeDeps, - includeTags: includeTags, - excludeTags: excludeTags, - includePkgs: includePkgs, - excludePkgs: excludePkgs, + AllPackages: make(map[string]*packages.Package), + Models: make(map[*ast.Ident]*entityDecl), + ExtraModels: make(map[*ast.Ident]*entityDecl), + excludeDeps: excludeDeps, + includeTags: includeTags, + excludeTags: excludeTags, + includePkgs: includePkgs, + excludePkgs: excludePkgs, + setXNullableForPointers: setXNullableForPointers, } if err := ac.build(pkgs); err != nil { return nil, err @@ -436,19 +438,20 @@ func newTypeIndex(pkgs []*packages.Package, excludeDeps bool, includeTags, exclu } type typeIndex struct { - AllPackages map[string]*packages.Package - Models map[*ast.Ident]*entityDecl - ExtraModels map[*ast.Ident]*entityDecl - Meta []metaSection - Routes []parsedPathContent - Operations []parsedPathContent - Parameters []*entityDecl - Responses []*entityDecl - excludeDeps bool - includeTags map[string]bool - excludeTags map[string]bool - includePkgs []string - excludePkgs []string + AllPackages map[string]*packages.Package + Models map[*ast.Ident]*entityDecl + ExtraModels map[*ast.Ident]*entityDecl + Meta []metaSection + Routes []parsedPathContent + Operations []parsedPathContent + Parameters []*entityDecl + Responses []*entityDecl + excludeDeps bool + includeTags map[string]bool + excludeTags map[string]bool + includePkgs []string + excludePkgs []string + setXNullableForPointers bool } func (a *typeIndex) build(pkgs []*packages.Package) error { diff --git a/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go b/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go index 9a0b77ca0..1ee769ae8 100644 --- a/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go +++ b/vendor/github.com/go-swagger/go-swagger/codescan/parameters.go @@ -339,7 +339,7 @@ func (p *parameterBuilder) buildFromStruct(decl *entityDecl, tpe *types.Struct, continue } - name, ignore, _, err := parseJSONTag(afld) + name, ignore, _, _, err := parseJSONTag(afld) if err != nil { return err } diff --git a/vendor/github.com/go-swagger/go-swagger/codescan/responses.go b/vendor/github.com/go-swagger/go-swagger/codescan/responses.go index 95dff0f85..39274baf0 100644 --- a/vendor/github.com/go-swagger/go-swagger/codescan/responses.go +++ b/vendor/github.com/go-swagger/go-swagger/codescan/responses.go @@ -333,7 +333,7 @@ func (r *responseBuilder) buildFromStruct(decl *entityDecl, tpe *types.Struct, r continue } - name, ignore, _, err := parseJSONTag(afld) + name, ignore, _, _, err := parseJSONTag(afld) if err != nil { return err } diff --git a/vendor/github.com/go-swagger/go-swagger/codescan/schema.go b/vendor/github.com/go-swagger/go-swagger/codescan/schema.go index 98bdecba6..640ac0830 100644 --- a/vendor/github.com/go-swagger/go-swagger/codescan/schema.go +++ b/vendor/github.com/go-swagger/go-swagger/codescan/schema.go @@ -365,6 +365,10 @@ func (s *schemaBuilder) buildFromType(tpe types.Type, tgt swaggerTypable) error return s.buildFromType(titpe.Underlying(), tgt) } + if titpe.TypeArgs() != nil && titpe.TypeArgs().Len() > 0 { + return s.buildFromType(titpe.Underlying(), tgt) + } + switch utitpe := tpe.Underlying().(type) { case *types.Struct: if decl, ok := s.ctx.FindModel(tio.Pkg().Path(), tio.Name()); ok { @@ -407,7 +411,7 @@ func (s *schemaBuilder) buildFromType(tpe types.Type, tgt swaggerTypable) error } if defaultName, ok := defaultName(cmt); ok { - debugLog(defaultName) + debugLog(defaultName) //nolint:govet return nil } @@ -651,6 +655,12 @@ func (s *schemaBuilder) buildFromInterface(decl *entityDecl, it *types.Interface ps.AddExtension("x-go-name", fld.Name()) } + if s.ctx.app.setXNullableForPointers { + if _, isPointer := fld.Type().(*types.Signature).Results().At(0).Type().(*types.Pointer); isPointer && (ps.Extensions == nil || (ps.Extensions["x-nullable"] == nil && ps.Extensions["x-isnullable"] == nil)) { + ps.AddExtension("x-nullable", true) + } + } + seen[name] = fld.Name() tgt.Properties[name] = ps } @@ -716,7 +726,7 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche continue } - _, ignore, _, err := parseJSONTag(afld) + _, ignore, _, _, err := parseJSONTag(afld) if err != nil { return err } @@ -816,7 +826,7 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche continue } - name, ignore, isString, err := parseJSONTag(afld) + name, ignore, isString, omitEmpty, err := parseJSONTag(afld) if err != nil { return err } @@ -853,6 +863,13 @@ func (s *schemaBuilder) buildFromStruct(decl *entityDecl, st *types.Struct, sche addExtension(&ps.VendorExtensible, "x-go-name", fld.Name()) } + if s.ctx.app.setXNullableForPointers { + if _, isPointer := fld.Type().(*types.Pointer); isPointer && !omitEmpty && + (ps.Extensions == nil || (ps.Extensions["x-nullable"] == nil && ps.Extensions["x-isnullable"] == nil)) { + ps.AddExtension("x-nullable", true) + } + } + // we have 2 cases: // 1. field with different name override tag // 2. field with different name removes tag @@ -1106,17 +1123,17 @@ func (t tagOptions) Name() string { return t[0] } -func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, err error) { +func parseJSONTag(field *ast.Field) (name string, ignore, isString, omitEmpty bool, err error) { if len(field.Names) > 0 { name = field.Names[0].Name } if field.Tag == nil || len(strings.TrimSpace(field.Tag.Value)) == 0 { - return name, false, false, nil + return name, false, false, false, nil } tv, err := strconv.Unquote(field.Tag.Value) if err != nil { - return name, false, false, err + return name, false, false, false, err } if strings.TrimSpace(tv) != "" { @@ -1129,16 +1146,18 @@ func parseJSONTag(field *ast.Field) (name string, ignore bool, isString bool, er isString = isFieldStringable(field.Type) } + omitEmpty = jsonParts.Contain("omitempty") + switch jsonParts.Name() { case "-": - return name, true, isString, nil + return name, true, isString, omitEmpty, nil case "": - return name, false, isString, nil + return name, false, isString, omitEmpty, nil default: - return jsonParts.Name(), false, isString, nil + return jsonParts.Name(), false, isString, omitEmpty, nil } } - return name, false, false, nil + return name, false, false, false, nil } // isFieldStringable check if the field type is a scalar. If the field type is diff --git a/vendor/github.com/go-swagger/go-swagger/generator/operation.go b/vendor/github.com/go-swagger/go-swagger/generator/operation.go index 07ff8a230..a2098c375 100644 --- a/vendor/github.com/go-swagger/go-swagger/generator/operation.go +++ b/vendor/github.com/go-swagger/go-swagger/generator/operation.go @@ -1258,7 +1258,7 @@ func (b *codeGenOpBuilder) analyzeTags() (string, []string, bool) { return tag, intersected, len(filter) == 0 || len(filter) > 0 && len(intersected) > 0 } -var versionedPkgRex = regexp.MustCompile(`(?i)(v)([0-9]+)`) +var versionedPkgRex = regexp.MustCompile(`(?i)^(v)([0-9]+)$`) func maxInt(a, b int) int { if a > b { diff --git a/vendor/github.com/go-swagger/go-swagger/generator/shared.go b/vendor/github.com/go-swagger/go-swagger/generator/shared.go index e466a9301..75ed251fe 100644 --- a/vendor/github.com/go-swagger/go-swagger/generator/shared.go +++ b/vendor/github.com/go-swagger/go-swagger/generator/shared.go @@ -280,7 +280,7 @@ type TemplateOpts struct { Target string `mapstructure:"target"` FileName string `mapstructure:"file_name"` SkipExists bool `mapstructure:"skip_exists"` - SkipFormat bool `mapstructure:"skip_format"` + SkipFormat bool `mapstructure:"skip_format"` // not a feature, but for debugging. generated code before formatting might not work because of unused imports. } // SectionOpts allows for specifying options to customize the templates used for generation diff --git a/vendor/github.com/go-swagger/go-swagger/generator/types.go b/vendor/github.com/go-swagger/go-swagger/generator/types.go index 5cc00f24b..59057ca8c 100644 --- a/vendor/github.com/go-swagger/go-swagger/generator/types.go +++ b/vendor/github.com/go-swagger/go-swagger/generator/types.go @@ -24,8 +24,8 @@ import ( "github.com/go-openapi/loads" "github.com/go-openapi/spec" "github.com/go-openapi/swag" + "github.com/go-viper/mapstructure/v2" "github.com/kr/pretty" - "github.com/mitchellh/mapstructure" ) const ( diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go index 8bf537f73..9d514947d 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go @@ -98,7 +98,7 @@ func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Co bucketCors, err := c.getBucketCors(ctx, bucketName) if err != nil { errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchCORSConfiguration" { + if errResponse.Code == NoSuchCORSConfiguration { return nil, nil } return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go index dbb5259a8..3a168c13e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -104,7 +104,7 @@ func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) if err != nil { errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { + if errResponse.Code == NoSuchBucketPolicy { return "", nil } return "", err diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index 7df211fda..e85aa322c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -136,15 +136,15 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) if objectName == "" { errResp = ErrorResponse{ StatusCode: resp.StatusCode, - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", + Code: NoSuchBucket, + Message: s3ErrorResponseMap[NoSuchBucket], BucketName: bucketName, } } else { errResp = ErrorResponse{ StatusCode: resp.StatusCode, - Code: "NoSuchKey", - Message: "The specified key does not exist.", + Code: NoSuchKey, + Message: s3ErrorResponseMap[NoSuchKey], BucketName: bucketName, Key: objectName, } @@ -152,23 +152,23 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) case http.StatusForbidden: errResp = ErrorResponse{ StatusCode: resp.StatusCode, - Code: "AccessDenied", - Message: "Access Denied.", + Code: AccessDenied, + Message: s3ErrorResponseMap[AccessDenied], BucketName: bucketName, Key: objectName, } case http.StatusConflict: errResp = ErrorResponse{ StatusCode: resp.StatusCode, - Code: "Conflict", - Message: "Bucket not empty.", + Code: Conflict, + Message: s3ErrorResponseMap[Conflict], BucketName: bucketName, } case http.StatusPreconditionFailed: errResp = ErrorResponse{ StatusCode: resp.StatusCode, - Code: "PreconditionFailed", - Message: s3ErrorResponseMap["PreconditionFailed"], + Code: PreconditionFailed, + Message: s3ErrorResponseMap[PreconditionFailed], BucketName: bucketName, Key: objectName, } @@ -209,7 +209,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) if errResp.Region == "" { errResp.Region = resp.Header.Get("x-amz-bucket-region") } - if errResp.Code == "InvalidRegion" && errResp.Region != "" { + if errResp.Code == InvalidRegion && errResp.Region != "" { errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) } @@ -218,10 +218,11 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) // errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. func errTransferAccelerationBucket(bucketName string) error { + msg := "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’." return ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", + Code: InvalidArgument, + Message: msg, BucketName: bucketName, } } @@ -231,7 +232,7 @@ func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) return ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "EntityTooLarge", + Code: EntityTooLarge, Message: msg, BucketName: bucketName, Key: objectName, @@ -243,7 +244,7 @@ func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) return ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "EntityTooSmall", + Code: EntityTooSmall, Message: msg, BucketName: bucketName, Key: objectName, @@ -255,7 +256,7 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) return ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "UnexpectedEOF", + Code: UnexpectedEOF, Message: msg, BucketName: bucketName, Key: objectName, @@ -266,7 +267,7 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) func errInvalidArgument(message string) error { return ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", + Code: InvalidArgument, Message: message, RequestID: "minio", } @@ -277,7 +278,7 @@ func errInvalidArgument(message string) error { func errAPINotSupported(message string) error { return ErrorResponse{ StatusCode: http.StatusNotImplemented, - Code: "APINotSupported", + Code: APINotSupported, Message: message, RequestID: "minio", } diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index 5cc85f61c..d3cb6c22a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -34,14 +34,14 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", + Code: InvalidBucketName, Message: err.Error(), } } if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "XMinioInvalidObjectName", + Code: XMinioInvalidObjectName, Message: err.Error(), } } @@ -659,14 +659,14 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, nil, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", + Code: InvalidBucketName, Message: err.Error(), } } if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, ObjectInfo{}, nil, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "XMinioInvalidObjectName", + Code: XMinioInvalidObjectName, Message: err.Error(), } } diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index 1af0fadbf..5bf67a666 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -285,7 +285,7 @@ func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefi // sure proper responses are received. if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { return listBucketResult, ErrorResponse{ - Code: "NotImplemented", + Code: NotImplemented, Message: "Truncated response should have continuation token set", } } @@ -419,19 +419,25 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts } for _, version := range vers { info := ObjectInfo{ - ETag: trimEtag(version.ETag), - Key: version.Key, - LastModified: version.LastModified.Truncate(time.Millisecond), - Size: version.Size, - Owner: version.Owner, - StorageClass: version.StorageClass, - IsLatest: version.IsLatest, - VersionID: version.VersionID, - IsDeleteMarker: version.isDeleteMarker, - UserTags: version.UserTags, - UserMetadata: version.UserMetadata, - Internal: version.Internal, - NumVersions: numVersions, + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified.Truncate(time.Millisecond), + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, + IsDeleteMarker: version.isDeleteMarker, + UserTags: version.UserTags, + UserMetadata: version.UserMetadata, + Internal: version.Internal, + NumVersions: numVersions, + ChecksumMode: version.ChecksumType, + ChecksumCRC32: version.ChecksumCRC32, + ChecksumCRC32C: version.ChecksumCRC32C, + ChecksumSHA1: version.ChecksumSHA1, + ChecksumSHA256: version.ChecksumSHA256, + ChecksumCRC64NVME: version.ChecksumCRC64NVME, } if !yield(info) { return false @@ -753,13 +759,9 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb objectStatCh := make(chan ObjectInfo, 1) go func() { defer close(objectStatCh) - send := func(obj ObjectInfo) bool { - select { - case <-ctx.Done(): - return false - case objectStatCh <- obj: - return true - } + if contextCanceled(ctx) { + objectStatCh <- ObjectInfo{Err: ctx.Err()} + return } var objIter iter.Seq[ObjectInfo] @@ -777,8 +779,11 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb } } for obj := range objIter { - if !send(obj) { + select { + case <-ctx.Done(): + objectStatCh <- ObjectInfo{Err: ctx.Err()} return + case objectStatCh <- obj: } } }() diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go index bf6239d2d..26c41d34a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go @@ -35,14 +35,14 @@ func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, promp if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", + Code: InvalidBucketName, Message: err.Error(), } } if err := s3utils.CheckValidObjectName(objectName); err != nil { return nil, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "XMinioInvalidObjectName", + Code: XMinioInvalidObjectName, Message: err.Error(), } } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go index 447d0c796..47d8419e6 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -35,7 +35,7 @@ func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuc err = c.doMakeBucket(ctx, bucketName, opts) if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { - if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { + if resp, ok := err.(ErrorResponse); ok && resp.Code == AuthorizationHeaderMalformed && resp.Region != "" { opts.Region = resp.Region err = c.doMakeBucket(ctx, bucketName, opts) } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 84bc19b28..844172324 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -44,7 +44,7 @@ func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) @@ -392,13 +392,14 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object // Instantiate all the complete multipart buffer. completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: completeMultipartUploadBuffer, - contentLength: int64(len(completeMultipartUploadBytes)), - contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), - customHeader: headers, + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + customHeader: headers, + expect200OKWithError: true, } // Execute POST to complete multipart upload for an objectName. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 987a3c692..4a7243edc 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -56,7 +56,7 @@ func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objec errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 5b4443ec5..2a38e014a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -22,6 +22,7 @@ import ( "context" "encoding/xml" "io" + "iter" "net/http" "net/url" "time" @@ -271,7 +272,7 @@ func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObj for _, obj := range rmResult.UnDeletedObjects { // Version does not exist is not an error ignore and continue. switch obj.Code { - case "InvalidArgument", "NoSuchVersion": + case InvalidArgument, NoSuchVersion: continue } resultCh <- RemoveObjectResult{ @@ -333,6 +334,33 @@ func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh return errorCh } +// RemoveObjectsWithIter bulk deletes multiple objects from a bucket. +// Objects (with optional versions) to be removed must be provided with +// an iterator. Objects are removed asynchronously and results must be +// consumed. If the returned result iterator is stopped, the context is +// canceled, or a remote call failed, the provided iterator will no +// longer accept more objects. +func (c *Client) RemoveObjectsWithIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], opts RemoveObjectsOptions) (iter.Seq[RemoveObjectResult], error) { + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + // Validate objects channel to be properly allocated. + if objectsIter == nil { + return nil, errInvalidArgument("Objects iter can never by nil") + } + + return func(yield func(RemoveObjectResult) bool) { + select { + case <-ctx.Done(): + return + default: + } + + c.removeObjectsIter(ctx, bucketName, objectsIter, yield, opts) + }, nil +} + // RemoveObjectsWithResult removes multiple objects from a bucket while // it is possible to specify objects versions which are received from // objectsCh. Remove results, successes and failures are sent back via @@ -381,6 +409,144 @@ func hasInvalidXMLChar(str string) bool { return false } +// Generate and call MultiDelete S3 requests based on entries received from the iterator. +func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) { + maxEntries := 1000 + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + processRemoveMultiObjectsResponseIter := func(batch []ObjectInfo, yield func(RemoveObjectResult) bool) bool { + if len(batch) == 0 { + return false + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute POST on bucket to remove objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + }) + if resp != nil { + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + } + } + if err != nil { + for _, b := range batch { + if !yield(RemoveObjectResult{ + ObjectName: b.Key, + ObjectVersionID: b.VersionID, + Err: err, + }) { + return false + } + } + return false + } + + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + if err := xmlDecoder(resp.Body, rmResult); err != nil { + yield(RemoveObjectResult{ObjectName: "", Err: err}) + return false + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + // Version does not exist is not an error ignore and continue. + switch obj.Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + if !yield(RemoveObjectResult{ + ObjectName: obj.Key, + ObjectVersionID: obj.VersionID, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + }) { + return false + } + } + + // Fill deletion that returned success + for _, obj := range rmResult.DeletedObjects { + if !yield(RemoveObjectResult{ + ObjectName: obj.Key, + // Only filled with versioned buckets + ObjectVersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + DeleteMarkerVersionID: obj.DeleteMarkerVersionID, + }) { + return false + } + } + + return true + } + + var batch []ObjectInfo + + next, stop := iter.Pull(objectsIter) + defer stop() + + for { + // Loop over entries by 1000 and call MultiDelete requests + object, ok := next() + if !ok { + // delete the remaining batch. + processRemoveMultiObjectsResponseIter(batch, yield) + return + } + + if hasInvalidXMLChar(object.Key) { + // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. + removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + VersionID: object.VersionID, + GovernanceBypass: opts.GovernanceBypass, + }) + if err := removeResult.Err; err != nil { + // Version does not exist is not an error ignore and continue. + switch ToErrorResponse(err).Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + } + if !yield(removeResult) { + return + } + + continue + } + + batch = append(batch, object) + if len(batch) < maxEntries { + continue + } + + if !processRemoveMultiObjectsResponseIter(batch, yield) { + return + } + + batch = batch[:0] + } +} + // Generate and call MultiDelete S3 requests based on entries received from objectsCh func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { maxEntries := 1000 @@ -407,7 +573,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh if err := removeResult.Err; err != nil { // Version does not exist is not an error ignore and continue. switch ToErrorResponse(err).Code { - case "InvalidArgument", "NoSuchVersion": + case InvalidArgument, NoSuchVersion: continue } resultCh <- removeResult @@ -442,13 +608,14 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh removeBytes := generateRemoveMultiObjectsRequest(batch) // Execute POST on bucket to remove objects. resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Base64: sumMD5Base64(removeBytes), - contentSHA256Hex: sum256Hex(removeBytes), - customHeader: headers, + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + expect200OKWithError: true, }) if resp != nil { if resp.StatusCode != http.StatusOK { @@ -535,7 +702,7 @@ func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectNam // This is needed specifically for abort and it cannot // be converged into default case. errorResponse = ErrorResponse{ - Code: "NoSuchUpload", + Code: NoSuchUpload, Message: "The specified multipart upload does not exist.", BucketName: bucketName, Key: objectName, diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index aaa3158b9..32d589716 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -107,6 +107,14 @@ type Version struct { M int // Parity blocks } `xml:"Internal"` + // Checksum values. Only returned by AiStor servers. + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` + ChecksumType string `xml:",omitempty"` + isDeleteMarker bool } diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index 11455beb3..a4b2af7ae 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -39,14 +39,14 @@ func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, err }) defer closeResponse(resp) if err != nil { - if ToErrorResponse(err).Code == "NoSuchBucket" { + if ToErrorResponse(err).Code == NoSuchBucket { return false, nil } return false, err } if resp != nil { resperr := httpRespToErrorResponse(resp, bucketName, "") - if ToErrorResponse(resperr).Code == "NoSuchBucket" { + if ToErrorResponse(resperr).Code == NoSuchBucket { return false, nil } if resp.StatusCode != http.StatusOK { @@ -63,14 +63,14 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", + Code: InvalidBucketName, Message: err.Error(), } } if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, ErrorResponse{ StatusCode: http.StatusBadRequest, - Code: "XMinioInvalidObjectName", + Code: XMinioInvalidObjectName, Message: err.Error(), } } @@ -102,8 +102,8 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ StatusCode: resp.StatusCode, - Code: "MethodNotAllowed", - Message: "The specified method is not allowed against this resource.", + Code: MethodNotAllowed, + Message: s3ErrorResponseMap[MethodNotAllowed], BucketName: bucketName, Key: objectName, } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index cc00f92a3..10a12ccfa 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "encoding/base64" + "encoding/xml" "errors" "fmt" "io" @@ -38,6 +39,7 @@ import ( "sync/atomic" "time" + "github.com/dustin/go-humanize" md5simd "github.com/minio/md5-simd" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/kvcache" @@ -45,6 +47,8 @@ import ( "github.com/minio/minio-go/v7/pkg/signer" "github.com/minio/minio-go/v7/pkg/singleflight" "golang.org/x/net/publicsuffix" + + internalutils "github.com/minio/minio-go/v7/pkg/utils" ) // Client implements Amazon S3 compatible methods. @@ -159,7 +163,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.92" + libraryVersion = "v7.0.94" ) // User Agent should always following the below style. @@ -455,7 +459,7 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro gcancel() if !IsNetworkOrHostDown(err, false) { switch ToErrorResponse(err).Code { - case "NoSuchBucket", "AccessDenied", "": + case NoSuchBucket, AccessDenied, "": atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) } } @@ -477,7 +481,7 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro gcancel() if !IsNetworkOrHostDown(err, false) { switch ToErrorResponse(err).Code { - case "NoSuchBucket", "AccessDenied", "": + case NoSuchBucket, AccessDenied, "": atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) } } @@ -512,6 +516,8 @@ type requestMetadata struct { streamSha256 bool addCrc *ChecksumType trailer http.Header // (http.Request).Trailer. Requires v4 signature. + + expect200OKWithError bool } // dumpHTTP - dump HTTP request and response. @@ -615,6 +621,28 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) { return resp, nil } +// Peek resp.Body looking for S3 XMl error response: +// - Return the error XML bytes if an error is found +// - Make sure to always restablish the whole http response stream before returning +func tryParseErrRespFromBody(resp *http.Response) ([]byte, error) { + peeker := internalutils.NewPeekReadCloser(resp.Body, 5*humanize.MiByte) + defer func() { + peeker.ReplayFromStart() + resp.Body = peeker + }() + + errResp := ErrorResponse{} + errBytes, err := xmlDecodeAndBody(peeker, &errResp) + if err != nil { + var unmarshalErr xml.UnmarshalError + if errors.As(err, &unmarshalErr) { + return nil, nil + } + return nil, err + } + return errBytes, nil +} + // List of success status. var successStatus = []int{ http.StatusOK, @@ -702,16 +730,30 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } - // For any known successful http status, return quickly. + var success bool + var errBodyBytes []byte + for _, httpStatus := range successStatus { if httpStatus == res.StatusCode { - return res, nil + success = true + break } } - // Read the body to be saved later. - errBodyBytes, err := io.ReadAll(res.Body) - // res.Body should be closed + if success { + if !metadata.expect200OKWithError { + return res, nil + } + errBodyBytes, err = tryParseErrRespFromBody(res) + if err == nil && len(errBodyBytes) == 0 { + // No S3 XML error is found + return res, nil + } + } else { + errBodyBytes, err = io.ReadAll(res.Body) + } + + // By now, res.Body should be closed closeResponse(res) if err != nil { return nil, err @@ -723,6 +765,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // For errors verify if its retryable otherwise fail quickly. errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + err = errResponse // Save the body back again. errBodySeeker.Seek(0, 0) // Seek back to starting point. @@ -736,11 +779,11 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // region is empty. if c.region == "" { switch errResponse.Code { - case "AuthorizationHeaderMalformed": + case AuthorizationHeaderMalformed: fallthrough - case "InvalidRegion": + case InvalidRegion: fallthrough - case "AccessDenied": + case AccessDenied: if errResponse.Region == "" { // Region is empty we simply return the error. return res, err diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go index e43b889b9..b41902f65 100644 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -84,18 +84,18 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck // request. Move forward and let the top level callers // succeed if possible based on their policy. switch errResp.Code { - case "NotImplemented": + case NotImplemented: switch errResp.Server { case "AmazonSnowball": return "snowball", nil case "cloudflare": return "us-east-1", nil } - case "AuthorizationHeaderMalformed": + case AuthorizationHeaderMalformed: fallthrough - case "InvalidRegion": + case InvalidRegion: fallthrough - case "AccessDenied": + case AccessDenied: if errResp.Region == "" { return "us-east-1", nil } diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 33e87e6e1..97c6930fb 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -31,6 +31,7 @@ import ( "hash" "hash/crc32" "io" + "iter" "log/slog" "math/rand" "mime/multipart" @@ -259,7 +260,7 @@ func cleanupVersionedBucket(bucketName string, c *minio.Client) error { } func isErrNotImplemented(err error) bool { - return minio.ToErrorResponse(err).Code == "NotImplemented" + return minio.ToErrorResponse(err).Code == minio.NotImplemented } func isRunOnFail() bool { @@ -465,8 +466,8 @@ func testMakeBucketError() { return } // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists && + minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou { logError(testName, function, args, startTime, "", "Invalid error returned by server", err) return } @@ -1073,7 +1074,7 @@ func testPutObjectWithVersioning() { var results []minio.ObjectInfo for info := range objectsInfo { if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", info.Err) return } results = append(results, info) @@ -3204,7 +3205,7 @@ func testGetObjectAttributesErrorCases() { } errorResponse := err.(minio.ErrorResponse) - if errorResponse.Code != "NoSuchBucket" { + if errorResponse.Code != minio.NoSuchBucket { logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil) return } @@ -3247,8 +3248,8 @@ func testGetObjectAttributesErrorCases() { } errorResponse = err.(minio.ErrorResponse) - if errorResponse.Code != "NoSuchKey" { - logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil) + if errorResponse.Code != minio.NoSuchKey { + logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchKey+" but got "+errorResponse.Code, nil) return } @@ -3272,8 +3273,8 @@ func testGetObjectAttributesErrorCases() { return } errorResponse = err.(minio.ErrorResponse) - if errorResponse.Code != "NoSuchVersion" { - logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil) + if errorResponse.Code != minio.NoSuchVersion { + logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchVersion+" but got "+errorResponse.Code, nil) return } @@ -3928,10 +3929,10 @@ func testRemoveMultipleObjects() { defer cleanupBucket(bucketName, c) - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + r := bytes.NewReader(bytes.Repeat([]byte("a"), 1)) // Multi remove of 1100 objects - nrObjects := 200 + nrObjects := 1100 objectsCh := make(chan minio.ObjectInfo) @@ -3940,7 +3941,7 @@ func testRemoveMultipleObjects() { // Upload objects and send them to objectsCh for i := 0; i < nrObjects; i++ { objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -3968,6 +3969,78 @@ func testRemoveMultipleObjects() { logSuccess(testName, function, args, startTime) } +// Test removing multiple objects with Remove API as iterator +func testRemoveMultipleObjectsIter() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + buf := []byte("a") + + // Multi remove of 1100 objects + nrObjects := 1100 + + objectsIter := func() iter.Seq[minio.ObjectInfo] { + return func(yield func(minio.ObjectInfo) bool) { + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + if !yield(minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + }) { + return + } + } + } + } + + // Call RemoveObjects API + results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter(), minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + + for result := range results { + if result.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", result.Err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + // Test removing multiple objects and check for results func testRemoveMultipleObjectsWithResult() { // initialize logging params @@ -3997,7 +4070,7 @@ func testRemoveMultipleObjectsWithResult() { defer cleanupVersionedBucket(bucketName, c) - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + buf := []byte("a") nrObjects := 10 nrLockedObjects := 5 @@ -4009,7 +4082,7 @@ func testRemoveMultipleObjectsWithResult() { // Upload objects and send them to objectsCh for i := 0; i < nrObjects; i++ { objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -7589,7 +7662,7 @@ func testGetObjectModified() { // Confirm that a Stat() call in between doesn't change the Object's cached etag. _, err = reader.Stat() - expectedError := "At least one of the pre-conditions you specified did not hold" + expectedError := "At least one of the pre-conditions you specified did not hold." if err.Error() != expectedError { logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) return @@ -7751,8 +7824,8 @@ func testMakeBucketErrorV2() { return } // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists && + minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou { logError(testName, function, args, startTime, "", "Invalid error returned by server", err) return } @@ -11415,6 +11488,87 @@ func testPutObject0ByteV2() { logSuccess(testName, function, args, startTime) } +// Test put object with 0 byte object with non-US-ASCII characters. +func testPutObjectMetadataNonUSASCIIV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + metadata := map[string]string{ + "test-zh": "你好", + "test-ja": "こんにちは", + "test-ko": "안녕하세요", + "test-ru": "Здравствуй", + "test-de": "Hallo", + "test-it": "Ciao", + "test-pt": "Olá", + "test-ar": "مرحبا", + "test-hi": "नमस्ते", + "test-hu": "Helló", + "test-ro": "Bună", + "test-be": "Прывiтанне", + "test-sl": "Pozdravljen", + "test-sr": "Здраво", + "test-bg": "Здравейте", + "test-uk": "Привіт", + } + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{ + UserMetadata: metadata, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + for k, v := range metadata { + if st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)) != v { + logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get("X-Amz-Meta-"+k), err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + // Test expected error cases func testComposeObjectErrorCases() { // initialize logging params @@ -13557,6 +13711,115 @@ func testRemoveObjects() { logSuccess(testName, function, args, startTime) } +// Test deleting multiple objects with object retention set in Governance mode, via iterators +func testRemoveObjectsIter() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + return + } + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + } + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + + objectsIter := c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{ + WithVersions: true, + Recursive: true, + }) + results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error sending delete request", err) + return + } + for result := range results { + if result.Err != nil { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + break + } + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + + objectsIter = c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) + results, err = c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{ + GovernanceBypass: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "Error sending delete request", err) + return + } + for result := range results { + if result.Err != nil { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", result.Err) + return + } + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + // Test get bucket tags func testGetBucketTagging() { // initialize logging params @@ -13585,7 +13848,7 @@ func testGetBucketTagging() { } _, err = c.GetBucketTagging(context.Background(), bucketName) - if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { logError(testName, function, args, startTime, "", "Invalid error from server failed", err) return } @@ -13627,7 +13890,7 @@ func testSetBucketTagging() { } _, err = c.GetBucketTagging(context.Background(), bucketName) - if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { logError(testName, function, args, startTime, "", "Invalid error from server", err) return } @@ -13699,7 +13962,7 @@ func testRemoveBucketTagging() { } _, err = c.GetBucketTagging(context.Background(), bucketName) - if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { logError(testName, function, args, startTime, "", "Invalid error from server", err) return } @@ -13740,7 +14003,7 @@ func testRemoveBucketTagging() { } _, err = c.GetBucketTagging(context.Background(), bucketName) - if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { logError(testName, function, args, startTime, "", "Invalid error from server", err) return } @@ -13809,6 +14072,7 @@ func main() { testPutMultipartObjectWithChecksums(false) testPutMultipartObjectWithChecksums(true) testPutObject0ByteV2() + testPutObjectMetadataNonUSASCIIV2() testPutObjectNoLengthV2() testPutObjectsUnknownV2() testGetObjectContextV2() @@ -13826,6 +14090,7 @@ func main() { testGetObjectS3Zip() testRemoveMultipleObjects() testRemoveMultipleObjectsWithResult() + testRemoveMultipleObjectsIter() testFPutObjectMultipart() testFPutObject() testGetObjectReadSeekFunctional() @@ -13852,6 +14117,7 @@ func main() { testPutObjectWithContentLanguage() testListObjects() testRemoveObjects() + testRemoveObjectsIter() testListObjectVersions() testStatObjectWithVersioning() testGetObjectWithVersioning() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 55636ad48..2f7993f4b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -730,6 +730,8 @@ type Metrics struct { Errors TimedErrStats `json:"failed,omitempty"` // Total number of entries that are queued for replication QStats InQueueMetric `json:"queued"` + // Total number of entries that have replication in progress + InProgress InProgressMetric `json:"inProgress"` // Deprecated fields // Total Pending size in bytes across targets PendingSize uint64 `json:"pendingReplicationSize,omitempty"` @@ -830,6 +832,9 @@ type InQueueMetric struct { Max QStat `json:"peak" msg:"pq"` } +// InProgressMetric holds stats for objects with replication in progress +type InProgressMetric InQueueMetric + // MetricName name of replication metric type MetricName string @@ -849,6 +854,14 @@ type WorkerStat struct { Max int32 `json:"max"` } +// TgtHealth holds health status of a target +type TgtHealth struct { + Online bool `json:"online"` + LastOnline time.Time `json:"lastOnline"` + TotalDowntime time.Duration `json:"totalDowntime"` + OfflineCount int64 `json:"offlineCount"` +} + // ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes // and number of entries that failed replication after 3 retries type ReplMRFStats struct { @@ -863,15 +876,18 @@ type ReplMRFStats struct { type ReplQNodeStats struct { NodeName string `json:"nodeName"` Uptime int64 `json:"uptime"` - Workers WorkerStat `json:"activeWorkers"` + Workers WorkerStat `json:"workers"` XferStats map[MetricName]XferStats `json:"transferSummary"` TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` - QStats InQueueMetric `json:"queueStats"` - MRFStats ReplMRFStats `json:"mrfStats"` - Retries CounterSummary `json:"retries"` - Errors CounterSummary `json:"errors"` + QStats InQueueMetric `json:"queueStats"` + InProgressStats InProgressMetric `json:"progressStats"` + + MRFStats ReplMRFStats `json:"mrfStats"` + Retries CounterSummary `json:"retries"` + Errors CounterSummary `json:"errors"` + TgtHealth map[string]TgtHealth `json:"tgtHealth,omitempty"` } // CounterSummary denotes the stats counter summary @@ -918,6 +934,19 @@ func (q ReplQueueStats) qStatSummary() InQueueMetric { return m } +// inProgressSummary returns cluster level stats for objects with replication in progress +func (q ReplQueueStats) inProgressSummary() InProgressMetric { + m := InProgressMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.InProgressStats.Avg) + m.Curr.Add(v.InProgressStats.Curr) + if m.Max.Count < v.InProgressStats.Max.Count { + m.Max.Add(v.InProgressStats.Max) + } + } + return m +} + // ReplQStats holds stats for objects in replication queue type ReplQStats struct { Uptime int64 `json:"uptime"` @@ -926,7 +955,9 @@ type ReplQStats struct { XferStats map[MetricName]XferStats `json:"xferStats"` TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` - QStats InQueueMetric `json:"qStats"` + QStats InQueueMetric `json:"qStats"` + InProgressStats InProgressMetric `json:"progressStats"` + MRFStats ReplMRFStats `json:"mrfStats"` Retries CounterSummary `json:"retries"` Errors CounterSummary `json:"errors"` @@ -935,10 +966,10 @@ type ReplQStats struct { // QStats returns cluster level stats for objects in replication queue func (q ReplQueueStats) QStats() (r ReplQStats) { r.QStats = q.qStatSummary() + r.InProgressStats = q.inProgressSummary() r.XferStats = make(map[MetricName]XferStats) r.TgtXferStats = make(map[string]map[MetricName]XferStats) r.Workers = q.Workers() - for _, node := range q.Nodes { for arn := range node.TgtXferStats { xmap, ok := node.TgtXferStats[arn] diff --git a/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go new file mode 100644 index 000000000..d6f674fac --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/utils/peek-reader-closer.go @@ -0,0 +1,73 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "bytes" + "errors" + "io" +) + +// PeekReadCloser offers a way to peek a ReadCloser stream and then +// return the exact stream of the underlying ReadCloser +type PeekReadCloser struct { + io.ReadCloser + + recordMode bool + recordMaxBuf int + recordBuf *bytes.Buffer +} + +// ReplayFromStart ensures next Read() will restart to stream the +// underlying ReadCloser stream from the beginning +func (prc *PeekReadCloser) ReplayFromStart() { + prc.recordMode = false +} + +func (prc *PeekReadCloser) Read(p []byte) (int, error) { + if prc.recordMode { + if prc.recordBuf.Len() > prc.recordMaxBuf { + return 0, errors.New("maximum peek buffer exceeded") + } + n, err := prc.ReadCloser.Read(p) + prc.recordBuf.Write(p[:n]) + return n, err + } + // Replay mode + if prc.recordBuf.Len() > 0 { + pn, _ := prc.recordBuf.Read(p) + return pn, nil + } + return prc.ReadCloser.Read(p) +} + +// Close releases the record buffer memory and close the underlying ReadCloser +func (prc *PeekReadCloser) Close() error { + prc.recordBuf.Reset() + return prc.ReadCloser.Close() +} + +// NewPeekReadCloser returns a new peek reader +func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser { + return &PeekReadCloser{ + ReadCloser: rc, + recordMode: true, // recording mode by default + recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)), + recordMaxBuf: maxBufSize, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 26bf441b5..e2c24b60a 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -161,7 +161,7 @@ func (p *PostPolicy) SetTagging(tagging string) error { } _, err := tags.ParseObjectXML(strings.NewReader(tagging)) if err != nil { - return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint + return errors.New(s3ErrorResponseMap[MalformedXML]) //nolint } policyCond := policyCondition{ matchType: "eq", diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index b83d1b2e5..59c7a163d 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -104,6 +104,8 @@ var retryableS3Codes = map[string]struct{}{ "ExpiredToken": {}, "ExpiredTokenException": {}, "SlowDown": {}, + "SlowDownWrite": {}, + "SlowDownRead": {}, // Add more AWS S3 codes here. } diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go index f7fad19f6..4bcc47d80 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-error.go +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go @@ -17,46 +17,100 @@ package minio +// Constants for error keys +const ( + NoSuchBucket = "NoSuchBucket" + NoSuchKey = "NoSuchKey" + NoSuchUpload = "NoSuchUpload" + AccessDenied = "AccessDenied" + Conflict = "Conflict" + PreconditionFailed = "PreconditionFailed" + InvalidArgument = "InvalidArgument" + EntityTooLarge = "EntityTooLarge" + EntityTooSmall = "EntityTooSmall" + UnexpectedEOF = "UnexpectedEOF" + APINotSupported = "APINotSupported" + InvalidRegion = "InvalidRegion" + NoSuchBucketPolicy = "NoSuchBucketPolicy" + BadDigest = "BadDigest" + IncompleteBody = "IncompleteBody" + InternalError = "InternalError" + InvalidAccessKeyID = "InvalidAccessKeyId" + InvalidBucketName = "InvalidBucketName" + InvalidDigest = "InvalidDigest" + InvalidRange = "InvalidRange" + MalformedXML = "MalformedXML" + MissingContentLength = "MissingContentLength" + MissingContentMD5 = "MissingContentMD5" + MissingRequestBodyError = "MissingRequestBodyError" + NotImplemented = "NotImplemented" + RequestTimeTooSkewed = "RequestTimeTooSkewed" + SignatureDoesNotMatch = "SignatureDoesNotMatch" + MethodNotAllowed = "MethodNotAllowed" + InvalidPart = "InvalidPart" + InvalidPartOrder = "InvalidPartOrder" + InvalidObjectState = "InvalidObjectState" + AuthorizationHeaderMalformed = "AuthorizationHeaderMalformed" + MalformedPOSTRequest = "MalformedPOSTRequest" + BucketNotEmpty = "BucketNotEmpty" + AllAccessDisabled = "AllAccessDisabled" + MalformedPolicy = "MalformedPolicy" + MissingFields = "MissingFields" + AuthorizationQueryParametersError = "AuthorizationQueryParametersError" + MalformedDate = "MalformedDate" + BucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + InvalidDuration = "InvalidDuration" + XAmzContentSHA256Mismatch = "XAmzContentSHA256Mismatch" + XMinioInvalidObjectName = "XMinioInvalidObjectName" + NoSuchCORSConfiguration = "NoSuchCORSConfiguration" + BucketAlreadyExists = "BucketAlreadyExists" + NoSuchVersion = "NoSuchVersion" + NoSuchTagSet = "NoSuchTagSet" + Testing = "Testing" + Success = "Success" +) + // Non exhaustive list of AWS S3 standard error responses - // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - "NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.", + AccessDenied: "Access Denied.", + BadDigest: "The Content-Md5 you specified did not match what we received.", + EntityTooSmall: "Your proposed upload is smaller than the minimum allowed object size.", + EntityTooLarge: "Your proposed upload exceeds the maximum allowed object size.", + IncompleteBody: "You did not provide the number of bytes specified by the Content-Length HTTP header.", + InternalError: "We encountered an internal error, please try again.", + InvalidAccessKeyID: "The access key ID you provided does not exist in our records.", + InvalidBucketName: "The specified bucket is not valid.", + InvalidDigest: "The Content-Md5 you specified is not valid.", + InvalidRange: "The requested range is not satisfiable.", + MalformedXML: "The XML you provided was not well-formed or did not validate against our published schema.", + MissingContentLength: "You must provide the Content-Length HTTP header.", + MissingContentMD5: "Missing required header for this request: Content-Md5.", + MissingRequestBodyError: "Request body is empty.", + NoSuchBucket: "The specified bucket does not exist.", + NoSuchBucketPolicy: "The bucket policy does not exist.", + NoSuchKey: "The specified key does not exist.", + NoSuchUpload: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + NotImplemented: "A header you provided implies functionality that is not implemented.", + PreconditionFailed: "At least one of the pre-conditions you specified did not hold.", + RequestTimeTooSkewed: "The difference between the request time and the server's time is too large.", + SignatureDoesNotMatch: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + MethodNotAllowed: "The specified method is not allowed against this resource.", + InvalidPart: "One or more of the specified parts could not be found.", + InvalidPartOrder: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + InvalidObjectState: "The operation is not valid for the current state of the object.", + AuthorizationHeaderMalformed: "The authorization header is malformed; the region is wrong.", + MalformedPOSTRequest: "The body of your POST request is not well-formed multipart/form-data.", + BucketNotEmpty: "The bucket you tried to delete is not empty.", + AllAccessDisabled: "All access to this bucket has been disabled.", + MalformedPolicy: "Policy has invalid resource.", + MissingFields: "Missing fields in request.", + AuthorizationQueryParametersError: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + MalformedDate: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + BucketAlreadyOwnedByYou: "Your previous request to create the named bucket succeeded and you already own it.", + InvalidDuration: "Duration provided in the request is invalid.", + XAmzContentSHA256Mismatch: "The provided 'x-amz-content-sha256' header does not match what was computed.", + NoSuchCORSConfiguration: "The specified bucket does not have a CORS configuration.", + Conflict: "Bucket not empty.", // Add new API errors here. } diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 6024bfa5b..cc96005b9 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -30,6 +30,7 @@ import ( "hash" "io" "math/rand" + "mime" "net" "net/http" "net/url" @@ -210,6 +211,7 @@ func extractObjMetadata(header http.Header) http.Header { "X-Amz-Server-Side-Encryption", "X-Amz-Tagging-Count", "X-Amz-Meta-", + "X-Minio-Meta-", // Add new headers to be preserved. // if you add new headers here, please extend // PutObjectOptions{} to preserve them @@ -223,6 +225,16 @@ func extractObjMetadata(header http.Header) http.Header { continue } found = true + if prefix == "X-Amz-Meta-" || prefix == "X-Minio-Meta-" { + for index, val := range v { + if strings.HasPrefix(val, "=?") { + decoder := mime.WordDecoder{} + if decoded, err := decoder.DecodeHeader(val); err == nil { + v[index] = decoded + } + } + } + } break } if found { @@ -268,7 +280,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err if err != nil { // Content-Length is not valid return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", + Code: InternalError, Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), BucketName: bucketName, Key: objectName, @@ -283,7 +295,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err mtime, err := parseRFC7231Time(h.Get("Last-Modified")) if err != nil { return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", + Code: InternalError, Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), BucketName: bucketName, Key: objectName, @@ -305,7 +317,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err expiry, err = parseRFC7231Time(expiryStr) if err != nil { return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", + Code: InternalError, Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), BucketName: bucketName, Key: objectName, @@ -327,7 +339,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err userTags, err := tags.ParseObjectTags(h.Get(amzTaggingHeader)) if err != nil { return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", + Code: InternalError, } } @@ -336,7 +348,7 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err tagCount, err = strconv.Atoi(count) if err != nil { return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", + Code: InternalError, Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), BucketName: bucketName, Key: objectName, diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index ec52857a3..47f0f5914 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -113,7 +113,7 @@ dockers: checksum: name_template: 'sha256sums.txt' snapshot: - name_template: "{{ incpatch .Version }}-next" + version_template: "{{ incpatch .Version }}-next" release: github: owner: pelletier diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index c3df8bee1..189be525e 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -59,7 +59,7 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // // With this feature enabled, types implementing the unstable/Unmarshaler // interface can be decoded from any structure of the document. It allows types -// that don't have a straightfoward TOML representation to provide their own +// that don't have a straightforward TOML representation to provide their own // decoding logic. // // Currently, types can only decode from a single value. Tables and array tables diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 5727452c1..fed9e87b9 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/vendor/github.com/spf13/cast/alias.go b/vendor/github.com/spf13/cast/alias.go new file mode 100644 index 000000000..855d60005 --- /dev/null +++ b/vendor/github.com/spf13/cast/alias.go @@ -0,0 +1,69 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. +package cast + +import ( + "reflect" + "slices" +) + +var kindNames = []string{ + reflect.String: "string", + reflect.Bool: "bool", + reflect.Int: "int", + reflect.Int8: "int8", + reflect.Int16: "int16", + reflect.Int32: "int32", + reflect.Int64: "int64", + reflect.Uint: "uint", + reflect.Uint8: "uint8", + reflect.Uint16: "uint16", + reflect.Uint32: "uint32", + reflect.Uint64: "uint64", + reflect.Float32: "float32", + reflect.Float64: "float64", +} + +var kinds = map[reflect.Kind]func(reflect.Value) any{ + reflect.String: func(v reflect.Value) any { return v.String() }, + reflect.Bool: func(v reflect.Value) any { return v.Bool() }, + reflect.Int: func(v reflect.Value) any { return int(v.Int()) }, + reflect.Int8: func(v reflect.Value) any { return int8(v.Int()) }, + reflect.Int16: func(v reflect.Value) any { return int16(v.Int()) }, + reflect.Int32: func(v reflect.Value) any { return int32(v.Int()) }, + reflect.Int64: func(v reflect.Value) any { return v.Int() }, + reflect.Uint: func(v reflect.Value) any { return uint(v.Uint()) }, + reflect.Uint8: func(v reflect.Value) any { return uint8(v.Uint()) }, + reflect.Uint16: func(v reflect.Value) any { return uint16(v.Uint()) }, + reflect.Uint32: func(v reflect.Value) any { return uint32(v.Uint()) }, + reflect.Uint64: func(v reflect.Value) any { return v.Uint() }, + reflect.Float32: func(v reflect.Value) any { return float32(v.Float()) }, + reflect.Float64: func(v reflect.Value) any { return v.Float() }, +} + +// resolveAlias attempts to resolve a named type to its underlying basic type (if possible). +// +// Pointers are expected to be indirected by this point. +func resolveAlias(i any) (any, bool) { + if i == nil { + return nil, false + } + + t := reflect.TypeOf(i) + + // Not a named type + if t.Name() == "" || slices.Contains(kindNames, t.Name()) { + return i, false + } + + resolve, ok := kinds[t.Kind()] + if !ok { // Not a supported kind + return i, false + } + + v := reflect.ValueOf(i) + + return resolve(v), true +} diff --git a/vendor/github.com/spf13/cast/basic.go b/vendor/github.com/spf13/cast/basic.go new file mode 100644 index 000000000..fa330e207 --- /dev/null +++ b/vendor/github.com/spf13/cast/basic.go @@ -0,0 +1,131 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "html/template" + "strconv" + "time" +) + +// ToBoolE casts any value to a bool type. +func ToBoolE(i any) (bool, error) { + i, _ = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + return b != 0, nil + case int8: + return b != 0, nil + case int16: + return b != 0, nil + case int32: + return b != 0, nil + case int64: + return b != 0, nil + case uint: + return b != 0, nil + case uint8: + return b != 0, nil + case uint16: + return b != 0, nil + case uint32: + return b != 0, nil + case uint64: + return b != 0, nil + case float32: + return b != 0, nil + case float64: + return b != 0, nil + case time.Duration: + return b != 0, nil + case string: + return strconv.ParseBool(b) + case json.Number: + v, err := ToInt64E(b) + if err == nil { + return v != 0, nil + } + + return false, fmt.Errorf(errorMsg, i, i, false) + default: + if i, ok := resolveAlias(i); ok { + return ToBoolE(i) + } + + return false, fmt.Errorf(errorMsg, i, i, false) + } +} + +// ToStringE casts any value to a string type. +func ToStringE(i any) (string, error) { + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int32: + return strconv.FormatInt(int64(s), 10), nil + case int64: + return strconv.FormatInt(s, 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(s, 10), nil + case json.Number: + return s.String(), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + if i, ok := indirect(i); ok { + return ToStringE(i) + } + + if i, ok := resolveAlias(i); ok { + return ToStringE(i) + } + + return "", fmt.Errorf(errorMsg, i, i, "") + } +} diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go index 386ba80a9..8d85539b3 100644 --- a/vendor/github.com/spf13/cast/cast.go +++ b/vendor/github.com/spf13/cast/cast.go @@ -8,187 +8,77 @@ package cast import "time" -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v +const errorMsg = "unable to cast %#v of type %T to %T" +const errorMsgWith = "unable to cast %#v of type %T to %T: %w" + +// Basic is a type parameter constraint for functions accepting basic types. +// +// It represents the supported basic types this package can cast to. +type Basic interface { + string | bool | Number | time.Time | time.Duration } -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v +// ToE casts any value to a [Basic] type. +func ToE[T Basic](i any) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case string: + v, err = ToStringE(i) + case bool: + v, err = ToBoolE(i) + case int: + v, err = toNumberE[int](i, parseInt[int]) + case int8: + v, err = toNumberE[int8](i, parseInt[int8]) + case int16: + v, err = toNumberE[int16](i, parseInt[int16]) + case int32: + v, err = toNumberE[int32](i, parseInt[int32]) + case int64: + v, err = toNumberE[int64](i, parseInt[int64]) + case uint: + v, err = toUnsignedNumberE[uint](i, parseUint[uint]) + case uint8: + v, err = toUnsignedNumberE[uint8](i, parseUint[uint8]) + case uint16: + v, err = toUnsignedNumberE[uint16](i, parseUint[uint16]) + case uint32: + v, err = toUnsignedNumberE[uint32](i, parseUint[uint32]) + case uint64: + v, err = toUnsignedNumberE[uint64](i, parseUint[uint64]) + case float32: + v, err = toNumberE[float32](i, parseFloat[float32]) + case float64: + v, err = toNumberE[float64](i, parseFloat[float64]) + case time.Time: + v, err = ToTimeE(i) + case time.Duration: + v, err = ToDurationE(i) + } + + if err != nil { + return t, err + } + + return v.(T), nil } -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v +// Must is a helper that wraps a call to a cast function and panics if the error is non-nil. +func Must[T any](i any, err error) T { + if err != nil { + panic(err) + } + + return i.(T) } -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} +// To casts any value to a [Basic] type. +func To[T Basic](i any) T { + v, _ := ToE[T](i) -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} - -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} - -// ToInt64Slice casts an interface to a []int64 type. -func ToInt64Slice(i interface{}) []int64 { - v, _ := ToInt64SliceE(i) - return v -} - -// ToUintSlice casts an interface to a []uint type. -func ToUintSlice(i interface{}) []uint { - v, _ := ToUintSliceE(i) - return v -} - -// ToFloat64Slice casts an interface to a []float64 type. -func ToFloat64Slice(i interface{}) []float64 { - v, _ := ToFloat64SliceE(i) - return v -} - -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) return v } diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 4d4ca8db1..000000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1472 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -type float64EProvider interface { - Float64() (float64, error) -} - -type float64Provider interface { - Float64() float64 -} - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case float64EProvider: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - case float64Provider: - d = time.Duration(s.Float64()) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - return b != 0, nil - case int64: - return b != 0, nil - case int32: - return b != 0, nil - case int16: - return b != 0, nil - case int8: - return b != 0, nil - case uint: - return b != 0, nil - case uint64: - return b != 0, nil - case uint32: - return b != 0, nil - case uint16: - return b != 0, nil - case uint8: - return b != 0, nil - case float64: - return b != 0, nil - case float32: - return b != 0, nil - case time.Duration: - return b != 0, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64Provider: - return s.Float64(), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64Provider: - return float32(s.Float64()), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - errorType := reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) { - m := map[K]V{} - - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, m) - } - - switch v := i.(type) { - case map[K]V: - return v, nil - - case map[K]any: - for k, val := range v { - m[k] = valFn(val) - } - - return m, nil - - case map[any]V: - for k, val := range v { - m[keyFn(k)] = val - } - - return m, nil - - case map[any]any: - for k, val := range v { - m[keyFn(k)] = valFn(val) - } - - return m, nil - - case string: - err := jsonStringToObject(v, &m) - - return m, err - - default: - return m, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, m) - } -} - -func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) { - return toMapE(i, ToString, fn) -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i any) (map[string]string, error) { - return toStringMapE(i, ToString) -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - m := map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - return toStringMapE(i, ToBool) -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - fn := func(i any) any { return i } - - return toStringMapE(i, fn) -} - -func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) { - m := map[string]T{} - - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, m) - } - - switch v := i.(type) { - case map[string]T: - return v, nil - - case map[string]any: - for k, val := range v { - m[k] = fn(val) - } - - return m, nil - - case map[any]T: - for k, val := range v { - m[ToString(k)] = val - } - - return m, nil - - case map[any]any: - for k, val := range v { - m[ToString(k)] = fn(val) - } - - return m, nil - - case string: - err := jsonStringToObject(v, &m) - - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, m) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - - for _, keyVal := range v.MapKeys() { - val, err := fnE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, m) - } - - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - - return m, nil -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i any) (map[string]int, error) { - return toStringMapIntE(i, ToInt, ToIntE) -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - return toStringMapIntE(i, ToInt64, ToInt64E) -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -func toSliceE[T any](i any, fn func(any) (T, error)) ([]T, error) { - if i == nil { - return []T{}, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, []T{}) - } - - switch v := i.(type) { - case []T: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]T, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := fn(s.Index(j).Interface()) - if err != nil { - return []T{}, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, []T{}) - } - a[j] = val - } - return a, nil - default: - return []T{}, fmt.Errorf("unable to cast %#v of type %T to %T", i, i, []T{}) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - return toSliceE(i, ToBoolE) -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []uint8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []uint: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []uint32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []uint64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - return toSliceE(i, ToIntE) -} - -// ToUintSliceE casts an interface to a []uint type. -func ToUintSliceE(i interface{}) ([]uint, error) { - return toSliceE(i, ToUintE) -} - -// ToFloat64SliceE casts an interface to a []float64 type. -func ToFloat64SliceE(i interface{}) ([]float64, error) { - return toSliceE(i, ToFloat64E) -} - -// ToInt64SliceE casts an interface to a []int64 type. -func ToInt64SliceE(i interface{}) ([]int64, error) { - return toSliceE(i, ToInt64E) -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - return toSliceE(i, ToDurationE) -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, -} - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/vendor/github.com/spf13/cast/indirect.go b/vendor/github.com/spf13/cast/indirect.go new file mode 100644 index 000000000..093345f73 --- /dev/null +++ b/vendor/github.com/spf13/cast/indirect.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "reflect" +) + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(i any) (any, bool) { + if i == nil { + return nil, false + } + + if t := reflect.TypeOf(i); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return i, false + } + + v := reflect.ValueOf(i) + + for v.Kind() == reflect.Ptr || (v.Kind() == reflect.Interface && v.Elem().Kind() == reflect.Ptr) { + if v.IsNil() { + return nil, true + } + + v = v.Elem() + } + + return v.Interface(), true +} diff --git a/vendor/github.com/spf13/cast/internal/time.go b/vendor/github.com/spf13/cast/internal/time.go new file mode 100644 index 000000000..906e9aece --- /dev/null +++ b/vendor/github.com/spf13/cast/internal/time.go @@ -0,0 +1,79 @@ +package internal + +import ( + "fmt" + "time" +) + +//go:generate stringer -type=TimeFormatType + +type TimeFormatType int + +const ( + TimeFormatNoTimezone TimeFormatType = iota + TimeFormatNamedTimezone + TimeFormatNumericTimezone + TimeFormatNumericAndNamedTimezone + TimeFormatTimeOnly +) + +type TimeFormat struct { + Format string + Typ TimeFormatType +} + +func (f TimeFormat) HasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.Typ >= TimeFormatNumericTimezone && f.Typ <= TimeFormatNumericAndNamedTimezone +} + +var TimeFormats = []TimeFormat{ + // Keep common formats at the top. + {"2006-01-02", TimeFormatNoTimezone}, + {time.RFC3339, TimeFormatNumericTimezone}, + {"2006-01-02T15:04:05", TimeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, TimeFormatNumericTimezone}, + {time.RFC1123, TimeFormatNamedTimezone}, + {time.RFC822Z, TimeFormatNumericTimezone}, + {time.RFC822, TimeFormatNamedTimezone}, + {time.RFC850, TimeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", TimeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", TimeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", TimeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", TimeFormatNoTimezone}, + {time.ANSIC, TimeFormatNoTimezone}, + {time.UnixDate, TimeFormatNamedTimezone}, + {time.RubyDate, TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", TimeFormatNumericTimezone}, + {"02 Jan 2006", TimeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", TimeFormatNumericTimezone}, + {time.Kitchen, TimeFormatTimeOnly}, + {time.Stamp, TimeFormatTimeOnly}, + {time.StampMilli, TimeFormatTimeOnly}, + {time.StampMicro, TimeFormatTimeOnly}, + {time.StampNano, TimeFormatTimeOnly}, +} + +func ParseDateWith(s string, location *time.Location, formats []TimeFormat) (d time.Time, e error) { + for _, format := range formats { + if d, e = time.Parse(format.Format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.Typ <= TimeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} diff --git a/vendor/github.com/spf13/cast/internal/timeformattype_string.go b/vendor/github.com/spf13/cast/internal/timeformattype_string.go new file mode 100644 index 000000000..60a29a862 --- /dev/null +++ b/vendor/github.com/spf13/cast/internal/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=TimeFormatType"; DO NOT EDIT. + +package internal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TimeFormatNoTimezone-0] + _ = x[TimeFormatNamedTimezone-1] + _ = x[TimeFormatNumericTimezone-2] + _ = x[TimeFormatNumericAndNamedTimezone-3] + _ = x[TimeFormatTimeOnly-4] +} + +const _TimeFormatType_name = "TimeFormatNoTimezoneTimeFormatNamedTimezoneTimeFormatNumericTimezoneTimeFormatNumericAndNamedTimezoneTimeFormatTimeOnly" + +var _TimeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i TimeFormatType) String() string { + if i < 0 || i >= TimeFormatType(len(_TimeFormatType_index)-1) { + return "TimeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _TimeFormatType_name[_TimeFormatType_index[i]:_TimeFormatType_index[i+1]] +} diff --git a/vendor/github.com/spf13/cast/map.go b/vendor/github.com/spf13/cast/map.go new file mode 100644 index 000000000..7d6beb56c --- /dev/null +++ b/vendor/github.com/spf13/cast/map.go @@ -0,0 +1,224 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) { + m := map[K]V{} + + if i == nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[K]V: + return v, nil + + case map[K]any: + for k, val := range v { + m[k] = valFn(val) + } + + return m, nil + + case map[any]V: + for k, val := range v { + m[keyFn(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[keyFn(k)] = valFn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + if err != nil { + return nil, err + } + + return m, nil + + default: + return nil, fmt.Errorf(errorMsg, i, i, m) + } +} + +func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) { + return toMapE(i, ToString, fn) +} + +// ToStringMapStringE casts any value to a map[string]string type. +func ToStringMapStringE(i any) (map[string]string, error) { + return toStringMapE(i, ToString) +} + +// ToStringMapStringSliceE casts any value to a map[string][]string type. +func ToStringMapStringSliceE(i any) (map[string][]string, error) { + m := map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]any: + for k, val := range v { + switch vt := val.(type) { + case []any: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[any][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]any: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + value, err := ToStringSliceE(val) + if err != nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + if err != nil { + return nil, err + } + + return m, nil + default: + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + return m, nil +} + +// ToStringMapBoolE casts any value to a map[string]bool type. +func ToStringMapBoolE(i any) (map[string]bool, error) { + return toStringMapE(i, ToBool) +} + +// ToStringMapE casts any value to a map[string]any type. +func ToStringMapE(i any) (map[string]any, error) { + fn := func(i any) any { return i } + + return toStringMapE(i, fn) +} + +func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) { + m := map[string]T{} + + if i == nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[string]T: + return v, nil + + case map[string]any: + for k, val := range v { + m[k] = fn(val) + } + + return m, nil + + case map[any]T: + for k, val := range v { + m[ToString(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[ToString(k)] = fn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + if err != nil { + return nil, err + } + + return m, nil + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + + for _, keyVal := range v.MapKeys() { + val, err := fnE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + + return m, nil +} + +// ToStringMapIntE casts any value to a map[string]int type. +func ToStringMapIntE(i any) (map[string]int, error) { + return toStringMapIntE(i, ToInt, ToIntE) +} + +// ToStringMapInt64E casts any value to a map[string]int64 type. +func ToStringMapInt64E(i any) (map[string]int64, error) { + return toStringMapIntE(i, ToInt64, ToInt64E) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v any) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/cast/number.go b/vendor/github.com/spf13/cast/number.go new file mode 100644 index 000000000..a58dc4d1e --- /dev/null +++ b/vendor/github.com/spf13/cast/number.go @@ -0,0 +1,549 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + +// Number is a type parameter constraint for functions accepting number types. +// +// It represents the supported number types this package can cast to. +type Number interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64 +} + +type integer interface { + int | int8 | int16 | int32 | int64 +} + +type unsigned interface { + uint | uint8 | uint16 | uint32 | uint64 +} + +type float interface { + float32 | float64 +} + +// ToNumberE casts any value to a [Number] type. +func ToNumberE[T Number](i any) (T, error) { + var t T + + switch any(t).(type) { + case int: + return toNumberE[T](i, parseNumber[T]) + case int8: + return toNumberE[T](i, parseNumber[T]) + case int16: + return toNumberE[T](i, parseNumber[T]) + case int32: + return toNumberE[T](i, parseNumber[T]) + case int64: + return toNumberE[T](i, parseNumber[T]) + case uint: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint8: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint16: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint32: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint64: + return toUnsignedNumberE[T](i, parseNumber[T]) + case float32: + return toNumberE[T](i, parseNumber[T]) + case float64: + return toNumberE[T](i, parseNumber[T]) + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +// ToNumber casts any value to a [Number] type. +func ToNumber[T Number](i any) T { + v, _ := ToNumberE[T](i) + + return v +} + +// toNumber's semantics differ from other "to" functions. +// It returns false as the second parameter if the conversion fails. +// This is to signal other callers that they should proceed with their own conversions. +func toNumber[T Number](i any) (T, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true + case int: + return T(s), true + case int8: + return T(s), true + case int16: + return T(s), true + case int32: + return T(s), true + case int64: + return T(s), true + case uint: + return T(s), true + case uint8: + return T(s), true + case uint16: + return T(s), true + case uint32: + return T(s), true + case uint64: + return T(s), true + case float32: + return T(s), true + case float64: + return T(s), true + case bool: + if s { + return 1, true + } + + return 0, true + case nil: + return 0, true + case time.Weekday: + return T(s), true + case time.Month: + return T(s), true + } + + return 0, false +} + +func toNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, ok := toNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(s.Float64()), nil + default: + if i, ok := resolveAlias(i); ok { + return toNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func toUnsignedNumber[T Number](i any) (T, bool, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true, true + case int: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int8: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int16: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case uint: + return T(s), true, true + case uint8: + return T(s), true, true + case uint16: + return T(s), true, true + case uint32: + return T(s), true, true + case uint64: + return T(s), true, true + case float32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case float64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case bool: + if s { + return 1, true, true + } + + return 0, true, true + case nil: + return 0, true, true + case time.Weekday: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case time.Month: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + } + + return 0, true, false +} + +func toUnsignedNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, valid, ok := toUnsignedNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + if !valid { + return 0, errNegativeNotAllowed + } + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v := s.Float64() + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + default: + if i, ok := resolveAlias(i); ok { + return toUnsignedNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func parseNumber[T Number](s string) (T, error) { + var t T + + switch any(t).(type) { + case int: + v, err := parseInt[int](s) + + return T(v), err + case int8: + v, err := parseInt[int8](s) + + return T(v), err + case int16: + v, err := parseInt[int16](s) + + return T(v), err + case int32: + v, err := parseInt[int32](s) + + return T(v), err + case int64: + v, err := parseInt[int64](s) + + return T(v), err + case uint: + v, err := parseUint[uint](s) + + return T(v), err + case uint8: + v, err := parseUint[uint8](s) + + return T(v), err + case uint16: + v, err := parseUint[uint16](s) + + return T(v), err + case uint32: + v, err := parseUint[uint32](s) + + return T(v), err + case uint64: + v, err := parseUint[uint64](s) + + return T(v), err + case float32: + v, err := strconv.ParseFloat(s, 32) + + return T(v), err + case float64: + v, err := strconv.ParseFloat(s, 64) + + return T(v), err + + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +func parseInt[T integer](s string) (T, error) { + v, err := strconv.ParseInt(trimDecimal(s), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseUint[T unsigned](s string) (T, error) { + v, err := strconv.ParseUint(strings.TrimLeft(trimDecimal(s), "+"), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseFloat[T float](s string) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case float32: + n, e := strconv.ParseFloat(s, 32) + + v = float32(n) + err = e + case float64: + n, e := strconv.ParseFloat(s, 64) + + v = float64(n) + err = e + } + + return v.(T), err +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i any) (float64, error) { + return toNumberE[float64](i, parseFloat[float64]) +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i any) (float32, error) { + return toNumberE[float32](i, parseFloat[float32]) +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i any) (int64, error) { + return toNumberE[int64](i, parseInt[int64]) +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i any) (int32, error) { + return toNumberE[int32](i, parseInt[int32]) +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i any) (int16, error) { + return toNumberE[int16](i, parseInt[int16]) +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i any) (int8, error) { + return toNumberE[int8](i, parseInt[int8]) +} + +// ToIntE casts an interface to an int type. +func ToIntE(i any) (int, error) { + return toNumberE[int](i, parseInt[int]) +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i any) (uint, error) { + return toUnsignedNumberE[uint](i, parseUint[uint]) +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i any) (uint64, error) { + return toUnsignedNumberE[uint64](i, parseUint[uint64]) +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i any) (uint32, error) { + return toUnsignedNumberE[uint32](i, parseUint[uint32]) +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i any) (uint16, error) { + return toUnsignedNumberE[uint16](i, parseUint[uint16]) +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i any) (uint8, error) { + return toUnsignedNumberE[uint8](i, parseUint[uint8]) +} + +func trimZeroDecimal(s string) string { + var foundZero bool + for i := len(s); i > 0; i-- { + switch s[i-1] { + case '.': + if foundZero { + return s[:i-1] + } + case '0': + foundZero = true + default: + return s + } + } + return s +} + +var stringNumberRe = regexp.MustCompile(`^([-+]?\d*)(\.\d*)?$`) + +// see [BenchmarkDecimal] for details about the implementation +func trimDecimal(s string) string { + if !strings.Contains(s, ".") { + return s + } + + matches := stringNumberRe.FindStringSubmatch(s) + if matches != nil { + // matches[1] is the captured integer part with sign + s = matches[1] + + // handle special cases + switch s { + case "-", "+": + s += "0" + case "": + s = "0" + } + + return s + } + + return s +} diff --git a/vendor/github.com/spf13/cast/slice.go b/vendor/github.com/spf13/cast/slice.go new file mode 100644 index 000000000..e6a8328c6 --- /dev/null +++ b/vendor/github.com/spf13/cast/slice.go @@ -0,0 +1,106 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "fmt" + "reflect" + "strings" +) + +// ToSliceE casts any value to a []any type. +func ToSliceE(i any) ([]any, error) { + i, _ = indirect(i) + + var s []any + + switch v := i.(type) { + case []any: + // TODO: use slices.Clone + return append(s, v...), nil + case []map[string]any: + for _, u := range v { + s = append(s, u) + } + + return s, nil + default: + return s, fmt.Errorf(errorMsg, i, i, s) + } +} + +func toSliceE[T Basic](i any) ([]T, error) { + v, ok, err := toSliceEOk[T](i) + if err != nil { + return nil, err + } + + if !ok { + return nil, fmt.Errorf(errorMsg, i, i, []T{}) + } + + return v, nil +} + +func toSliceEOk[T Basic](i any) ([]T, bool, error) { + i, _ = indirect(i) + if i == nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + switch v := i.(type) { + case []T: + // TODO: clone slice + return v, true, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]T, s.Len()) + + for j := 0; j < s.Len(); j++ { + val, err := ToE[T](s.Index(j).Interface()) + if err != nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + a[j] = val + } + + return a, true, nil + default: + return nil, false, nil + } +} + +// ToStringSliceE casts any value to a []string type. +func ToStringSliceE(i any) ([]string, error) { + if a, ok, err := toSliceEOk[string](i); ok { + if err != nil { + return nil, err + } + + return a, nil + } + + var a []string + + switch v := i.(type) { + case string: + return strings.Fields(v), nil + case any: + str, err := ToStringE(v) + if err != nil { + return nil, fmt.Errorf(errorMsg, i, i, a) + } + + return []string{str}, nil + default: + return nil, fmt.Errorf(errorMsg, i, i, a) + } +} diff --git a/vendor/github.com/spf13/cast/time.go b/vendor/github.com/spf13/cast/time.go new file mode 100644 index 000000000..744cd5acc --- /dev/null +++ b/vendor/github.com/spf13/cast/time.go @@ -0,0 +1,116 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/spf13/cast/internal" +) + +// ToTimeE any value to a [time.Time] type. +func ToTimeE(i any) (time.Time, error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i any, location *time.Location) (tim time.Time, err error) { + i, _ = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDateInDefaultLocation(v, location) + case json.Number: + // Originally this used ToInt64E, but adding string float conversion broke ToTime. + // the behavior of ToTime would have changed if we continued using it. + // For now, using json.Number's own Int64 method should be good enough to preserve backwards compatibility. + v = json.Number(trimZeroDecimal(string(v))) + s, err1 := v.Int64() + if err1 != nil { + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } + return time.Unix(s, 0), nil + case int: + return time.Unix(int64(v), 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case nil: + return time.Time{}, nil + default: + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } +} + +// ToDurationE casts any value to a [time.Duration] type. +func ToDurationE(i any) (time.Duration, error) { + i, _ = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + v, err := ToInt64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " int64", "time.Duration")) + } + + return time.Duration(v), nil + case float32, float64, float64EProvider, float64Provider: + v, err := ToFloat64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " float64", "time.Duration")) + } + + return time.Duration(v), nil + case string: + if !strings.ContainsAny(s, "nsuµmh") { + return time.ParseDuration(s + "ns") + } + + return time.ParseDuration(s) + case nil: + return time.Duration(0), nil + default: + if i, ok := resolveAlias(i); ok { + return ToDurationE(i) + } + + return 0, fmt.Errorf(errorMsg, i, i, time.Duration(0)) + } +} + +// StringToDate attempts to parse a string into a [time.Time] type using a +// predefined list of formats. +// +// If no suitable format is found, an error is returned. +func StringToDate(s string) (time.Time, error) { + return internal.ParseDateWith(s, time.UTC, internal.TimeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return internal.ParseDateWith(s, location, internal.TimeFormats) +} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82c..000000000 --- a/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/vendor/github.com/spf13/cast/zz_generated.go b/vendor/github.com/spf13/cast/zz_generated.go new file mode 100644 index 000000000..ce3ec0f78 --- /dev/null +++ b/vendor/github.com/spf13/cast/zz_generated.go @@ -0,0 +1,261 @@ +// Code generated by cast generator. DO NOT EDIT. + +package cast + +import "time" + +// ToBool casts any value to a(n) bool type. +func ToBool(i any) bool { + v, _ := ToBoolE(i) + return v +} + +// ToString casts any value to a(n) string type. +func ToString(i any) string { + v, _ := ToStringE(i) + return v +} + +// ToTime casts any value to a(n) time.Time type. +func ToTime(i any) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToTimeInDefaultLocation casts any value to a(n) time.Time type. +func ToTimeInDefaultLocation(i any, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + +// ToDuration casts any value to a(n) time.Duration type. +func ToDuration(i any) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToInt casts any value to a(n) int type. +func ToInt(i any) int { + v, _ := ToIntE(i) + return v +} + +// ToInt8 casts any value to a(n) int8 type. +func ToInt8(i any) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt16 casts any value to a(n) int16 type. +func ToInt16(i any) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt32 casts any value to a(n) int32 type. +func ToInt32(i any) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt64 casts any value to a(n) int64 type. +func ToInt64(i any) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToUint casts any value to a(n) uint type. +func ToUint(i any) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint8 casts any value to a(n) uint8 type. +func ToUint8(i any) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToUint16 casts any value to a(n) uint16 type. +func ToUint16(i any) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint32 casts any value to a(n) uint32 type. +func ToUint32(i any) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint64 casts any value to a(n) uint64 type. +func ToUint64(i any) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToFloat32 casts any value to a(n) float32 type. +func ToFloat32(i any) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToFloat64 casts any value to a(n) float64 type. +func ToFloat64(i any) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToStringMapString casts any value to a(n) map[string]string type. +func ToStringMapString(i any) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts any value to a(n) map[string][]string type. +func ToStringMapStringSlice(i any) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts any value to a(n) map[string]bool type. +func ToStringMapBool(i any) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts any value to a(n) map[string]int type. +func ToStringMapInt(i any) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts any value to a(n) map[string]int64 type. +func ToStringMapInt64(i any) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts any value to a(n) map[string]any type. +func ToStringMap(i any) map[string]any { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts any value to a(n) []any type. +func ToSlice(i any) []any { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts any value to a(n) []bool type. +func ToBoolSlice(i any) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts any value to a(n) []string type. +func ToStringSlice(i any) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts any value to a(n) []int type. +func ToIntSlice(i any) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToInt64Slice casts any value to a(n) []int64 type. +func ToInt64Slice(i any) []int64 { + v, _ := ToInt64SliceE(i) + return v +} + +// ToUintSlice casts any value to a(n) []uint type. +func ToUintSlice(i any) []uint { + v, _ := ToUintSliceE(i) + return v +} + +// ToFloat64Slice casts any value to a(n) []float64 type. +func ToFloat64Slice(i any) []float64 { + v, _ := ToFloat64SliceE(i) + return v +} + +// ToDurationSlice casts any value to a(n) []time.Duration type. +func ToDurationSlice(i any) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} + +// ToBoolSliceE casts any value to a(n) []bool type. +func ToBoolSliceE(i any) ([]bool, error) { + return toSliceE[bool](i) +} + +// ToDurationSliceE casts any value to a(n) []time.Duration type. +func ToDurationSliceE(i any) ([]time.Duration, error) { + return toSliceE[time.Duration](i) +} + +// ToIntSliceE casts any value to a(n) []int type. +func ToIntSliceE(i any) ([]int, error) { + return toSliceE[int](i) +} + +// ToInt8SliceE casts any value to a(n) []int8 type. +func ToInt8SliceE(i any) ([]int8, error) { + return toSliceE[int8](i) +} + +// ToInt16SliceE casts any value to a(n) []int16 type. +func ToInt16SliceE(i any) ([]int16, error) { + return toSliceE[int16](i) +} + +// ToInt32SliceE casts any value to a(n) []int32 type. +func ToInt32SliceE(i any) ([]int32, error) { + return toSliceE[int32](i) +} + +// ToInt64SliceE casts any value to a(n) []int64 type. +func ToInt64SliceE(i any) ([]int64, error) { + return toSliceE[int64](i) +} + +// ToUintSliceE casts any value to a(n) []uint type. +func ToUintSliceE(i any) ([]uint, error) { + return toSliceE[uint](i) +} + +// ToUint8SliceE casts any value to a(n) []uint8 type. +func ToUint8SliceE(i any) ([]uint8, error) { + return toSliceE[uint8](i) +} + +// ToUint16SliceE casts any value to a(n) []uint16 type. +func ToUint16SliceE(i any) ([]uint16, error) { + return toSliceE[uint16](i) +} + +// ToUint32SliceE casts any value to a(n) []uint32 type. +func ToUint32SliceE(i any) ([]uint32, error) { + return toSliceE[uint32](i) +} + +// ToUint64SliceE casts any value to a(n) []uint64 type. +func ToUint64SliceE(i any) ([]uint64, error) { + return toSliceE[uint64](i) +} + +// ToFloat32SliceE casts any value to a(n) []float32 type. +func ToFloat32SliceE(i any) ([]float32, error) { + return toSliceE[float32](i) +} + +// ToFloat64SliceE casts any value to a(n) []float64 type. +func ToFloat64SliceE(i any) ([]float64, error) { + return toSliceE[float64](i) +} diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md index b8c1829bb..d50e90e45 100644 --- a/vendor/github.com/ugorji/go/codec/README.md +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -80,6 +80,32 @@ Rich Feature Set includes: rpc server/client codec to support msgpack-rpc protocol defined at: https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +# Supported build tags + +We gain performance by code-generating fast-paths for slices and maps of built-in types, +and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits. + +The results are 20-40% performance improvements. + +Building and running is configured using build tags as below. + +At runtime: + +- codec.safe: run in safe mode (not using unsafe optimizations) +- codec.notmono: use generics code (bypassing performance-boosting monomorphized code) +- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes) + +Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath. +Pls use these mostly during development - use codec.XXX in your go files. + +Build only: + +- codec.build: used to generate fastpath and monomorphization code + +Test only: + +- codec.notmammoth: skip the mammoth generated tests + # Extension Support Users can register a function to handle the encoding or decoding of their custom @@ -219,6 +245,12 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g. go test -tags "alltests codec.safe" -run Suite ``` +You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g. + +``` + go test -tags codec.notmono -run Json +``` + # Running Benchmarks ``` diff --git a/vendor/github.com/ugorji/go/codec/base.fastpath.generated.go b/vendor/github.com/ugorji/go/codec/base.fastpath.generated.go new file mode 100644 index 000000000..d1fbba460 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/base.fastpath.generated.go @@ -0,0 +1,259 @@ +//go:build !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fastpath.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register them in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types (numeric, bool, string, []byte) +// - maps of builtin types to builtin or interface{} type, EXCEPT FOR +// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{} +// AND values of type type int8/16/32, uint16/32 +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "slices" + "sort" +) + +const fastpathEnabled = true + +type fastpathARtid [56]uintptr + +type fastpathRtRtid struct { + rtid uintptr + rt reflect.Type +} +type fastpathARtRtid [56]fastpathRtRtid + +var ( + fastpathAvRtidArr fastpathARtid + fastpathAvRtRtidArr fastpathARtRtid + fastpathAvRtid = fastpathAvRtidArr[:] + fastpathAvRtRtid = fastpathAvRtRtidArr[:] +) + +func fastpathAvIndex(rtid uintptr) (i uint, ok bool) { + return searchRtids(fastpathAvRtid, rtid) +} + +func init() { + var i uint = 0 + fn := func(v interface{}) { + xrt := reflect.TypeOf(v) + xrtid := rt2id(xrt) + xptrtid := rt2id(reflect.PointerTo(xrt)) + fastpathAvRtid[i] = xrtid + fastpathAvRtRtid[i] = fastpathRtRtid{rtid: xrtid, rt: xrt} + encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid) + decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid) + i++ + } + + fn([]interface{}(nil)) + fn([]string(nil)) + fn([][]byte(nil)) + fn([]float32(nil)) + fn([]float64(nil)) + fn([]uint8(nil)) + fn([]uint64(nil)) + fn([]int(nil)) + fn([]int32(nil)) + fn([]int64(nil)) + fn([]bool(nil)) + + fn(map[string]interface{}(nil)) + fn(map[string]string(nil)) + fn(map[string][]byte(nil)) + fn(map[string]uint8(nil)) + fn(map[string]uint64(nil)) + fn(map[string]int(nil)) + fn(map[string]int32(nil)) + fn(map[string]float64(nil)) + fn(map[string]bool(nil)) + fn(map[uint8]interface{}(nil)) + fn(map[uint8]string(nil)) + fn(map[uint8][]byte(nil)) + fn(map[uint8]uint8(nil)) + fn(map[uint8]uint64(nil)) + fn(map[uint8]int(nil)) + fn(map[uint8]int32(nil)) + fn(map[uint8]float64(nil)) + fn(map[uint8]bool(nil)) + fn(map[uint64]interface{}(nil)) + fn(map[uint64]string(nil)) + fn(map[uint64][]byte(nil)) + fn(map[uint64]uint8(nil)) + fn(map[uint64]uint64(nil)) + fn(map[uint64]int(nil)) + fn(map[uint64]int32(nil)) + fn(map[uint64]float64(nil)) + fn(map[uint64]bool(nil)) + fn(map[int]interface{}(nil)) + fn(map[int]string(nil)) + fn(map[int][]byte(nil)) + fn(map[int]uint8(nil)) + fn(map[int]uint64(nil)) + fn(map[int]int(nil)) + fn(map[int]int32(nil)) + fn(map[int]float64(nil)) + fn(map[int]bool(nil)) + fn(map[int32]interface{}(nil)) + fn(map[int32]string(nil)) + fn(map[int32][]byte(nil)) + fn(map[int32]uint8(nil)) + fn(map[int32]uint64(nil)) + fn(map[int32]int(nil)) + fn(map[int32]int32(nil)) + fn(map[int32]float64(nil)) + fn(map[int32]bool(nil)) + + sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] }) + sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid }) + slices.Sort(encBuiltinRtids) + slices.Sort(decBuiltinRtids) +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { + case *[]interface{}: + *v = nil + case *[]string: + *v = nil + case *[][]byte: + *v = nil + case *[]float32: + *v = nil + case *[]float64: + *v = nil + case *[]uint8: + *v = nil + case *[]uint64: + *v = nil + case *[]int: + *v = nil + case *[]int32: + *v = nil + case *[]int64: + *v = nil + case *[]bool: + *v = nil + + case *map[string]interface{}: + *v = nil + case *map[string]string: + *v = nil + case *map[string][]byte: + *v = nil + case *map[string]uint8: + *v = nil + case *map[string]uint64: + *v = nil + case *map[string]int: + *v = nil + case *map[string]int32: + *v = nil + case *map[string]float64: + *v = nil + case *map[string]bool: + *v = nil + case *map[uint8]interface{}: + *v = nil + case *map[uint8]string: + *v = nil + case *map[uint8][]byte: + *v = nil + case *map[uint8]uint8: + *v = nil + case *map[uint8]uint64: + *v = nil + case *map[uint8]int: + *v = nil + case *map[uint8]int32: + *v = nil + case *map[uint8]float64: + *v = nil + case *map[uint8]bool: + *v = nil + case *map[uint64]interface{}: + *v = nil + case *map[uint64]string: + *v = nil + case *map[uint64][]byte: + *v = nil + case *map[uint64]uint8: + *v = nil + case *map[uint64]uint64: + *v = nil + case *map[uint64]int: + *v = nil + case *map[uint64]int32: + *v = nil + case *map[uint64]float64: + *v = nil + case *map[uint64]bool: + *v = nil + case *map[int]interface{}: + *v = nil + case *map[int]string: + *v = nil + case *map[int][]byte: + *v = nil + case *map[int]uint8: + *v = nil + case *map[int]uint64: + *v = nil + case *map[int]int: + *v = nil + case *map[int]int32: + *v = nil + case *map[int]float64: + *v = nil + case *map[int]bool: + *v = nil + case *map[int32]interface{}: + *v = nil + case *map[int32]string: + *v = nil + case *map[int32][]byte: + *v = nil + case *map[int32]uint8: + *v = nil + case *map[int32]uint64: + *v = nil + case *map[int32]int: + *v = nil + case *map[int32]int32: + *v = nil + case *map[int32]float64: + *v = nil + case *map[int32]bool: + *v = nil + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} diff --git a/vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go b/vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go new file mode 100644 index 000000000..572383463 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go @@ -0,0 +1,6259 @@ +//go:build !notfastpath && !codec.notfastpath && (notmono || codec.notmono) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fastpath.notmono.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "reflect" + "slices" + "sort" +) + +type fastpathE[T encDriver] struct { + rtid uintptr + rt reflect.Type + encfn func(*encoder[T], *encFnInfo, reflect.Value) +} +type fastpathD[T decDriver] struct { + rtid uintptr + rt reflect.Type + decfn func(*decoder[T], *decFnInfo, reflect.Value) +} +type fastpathEs[T encDriver] [56]fastpathE[T] +type fastpathDs[T decDriver] [56]fastpathD[T] + +type fastpathET[T encDriver] struct{} +type fastpathDT[T decDriver] struct{} + +func (helperEncDriver[T]) fastpathEList() *fastpathEs[T] { + var i uint = 0 + var s fastpathEs[T] + fn := func(v interface{}, fe func(*encoder[T], *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathE[T]{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoder[T]).fastpathEncSliceIntfR) + fn([]string(nil), (*encoder[T]).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoder[T]).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoder[T]).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoder[T]).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoder[T]).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoder[T]).fastpathEncSliceUint64R) + fn([]int(nil), (*encoder[T]).fastpathEncSliceIntR) + fn([]int32(nil), (*encoder[T]).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoder[T]).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoder[T]).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoder[T]).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoder[T]).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoder[T]).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoder[T]).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoder[T]).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoder[T]).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoder[T]).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoder[T]).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoder[T]).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoder[T]).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoder[T]).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoder[T]).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoder[T]).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoder[T]).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoder[T]).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoder[T]).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoder[T]).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoder[T]).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoder[T]).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoder[T]).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoder[T]).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoder[T]).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoder[T]).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoder[T]).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoder[T]).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoder[T]).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoder[T]).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoder[T]).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoder[T]).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoder[T]).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoder[T]).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoder[T]).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoder[T]).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoder[T]).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoder[T]).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoder[T]).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoder[T]).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoder[T]).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoder[T]).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoder[T]).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoder[T]).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoder[T]).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoder[T]).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoder[T]).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoder[T]).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriver[T]) fastpathDList() *fastpathDs[T] { + var i uint = 0 + var s fastpathDs[T] + fn := func(v interface{}, fd func(*decoder[T], *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathD[T]{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoder[T]).fastpathDecSliceIntfR) + fn([]string(nil), (*decoder[T]).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoder[T]).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoder[T]).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoder[T]).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoder[T]).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoder[T]).fastpathDecSliceUint64R) + fn([]int(nil), (*decoder[T]).fastpathDecSliceIntR) + fn([]int32(nil), (*decoder[T]).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoder[T]).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoder[T]).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoder[T]).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoder[T]).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoder[T]).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoder[T]).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoder[T]).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoder[T]).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoder[T]).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoder[T]).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoder[T]).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoder[T]).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoder[T]).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoder[T]).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoder[T]).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoder[T]).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoder[T]).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoder[T]).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoder[T]).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoder[T]).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoder[T]).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoder[T]).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoder[T]).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoder[T]).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoder[T]).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoder[T]).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoder[T]).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoder[T]).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoder[T]).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoder[T]).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoder[T]).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoder[T]).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoder[T]).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoder[T]).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoder[T]).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoder[T]).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoder[T]).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoder[T]).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoder[T]).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoder[T]).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoder[T]).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoder[T]).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoder[T]).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoder[T]).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoder[T]).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoder[T]).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoder[T]).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +// -- encode + +// -- -- fast path type switch +func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { + var ft fastpathET[T] + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions +func (e *encoder[T]) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathET[T]) EncSliceIntfV(v []interface{}, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceIntfV(v []interface{}, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathET[T]) EncSliceStringV(v []string, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceStringV(v []string, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathET[T]) EncSliceBytesV(v [][]byte, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceBytesV(v [][]byte, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathET[T]) EncSliceFloat32V(v []float32, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceFloat32V(v []float32, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathET[T]) EncSliceFloat64V(v []float64, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceFloat64V(v []float64, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathET[T]) EncSliceUint8V(v []uint8, e *encoder[T]) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathET[T]) EncAsMapSliceUint8V(v []uint8, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathET[T]) EncSliceUint64V(v []uint64, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceUint64V(v []uint64, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathET[T]) EncSliceIntV(v []int, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceIntV(v []int, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathET[T]) EncSliceInt32V(v []int32, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceInt32V(v []int32, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathET[T]) EncSliceInt64V(v []int64, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceInt64V(v []int64, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathET[T]) EncSliceBoolV(v []bool, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathET[T]) EncAsMapSliceBoolV(v []bool, e *encoder[T]) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoder[T]) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathET[T]) EncMapStringIntfV(v map[string]interface{}, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathET[T]) EncMapStringStringV(v map[string]string, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathET[T]) EncMapStringBytesV(v map[string][]byte, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathET[T]) EncMapStringUint8V(v map[string]uint8, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathET[T]) EncMapStringUint64V(v map[string]uint64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathET[T]) EncMapStringIntV(v map[string]int, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathET[T]) EncMapStringInt32V(v map[string]int32, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathET[T]) EncMapStringFloat64V(v map[string]float64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathET[T]) EncMapStringBoolV(v map[string]bool, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathET[T]) EncMapUint8IntfV(v map[uint8]interface{}, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathET[T]) EncMapUint8StringV(v map[uint8]string, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathET[T]) EncMapUint8BytesV(v map[uint8][]byte, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathET[T]) EncMapUint8Uint8V(v map[uint8]uint8, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathET[T]) EncMapUint8Uint64V(v map[uint8]uint64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathET[T]) EncMapUint8IntV(v map[uint8]int, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathET[T]) EncMapUint8Int32V(v map[uint8]int32, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathET[T]) EncMapUint8Float64V(v map[uint8]float64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathET[T]) EncMapUint8BoolV(v map[uint8]bool, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathET[T]) EncMapUint64IntfV(v map[uint64]interface{}, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathET[T]) EncMapUint64StringV(v map[uint64]string, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathET[T]) EncMapUint64BytesV(v map[uint64][]byte, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathET[T]) EncMapUint64Uint8V(v map[uint64]uint8, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathET[T]) EncMapUint64Uint64V(v map[uint64]uint64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathET[T]) EncMapUint64IntV(v map[uint64]int, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathET[T]) EncMapUint64Int32V(v map[uint64]int32, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathET[T]) EncMapUint64Float64V(v map[uint64]float64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathET[T]) EncMapUint64BoolV(v map[uint64]bool, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathET[T]) EncMapIntIntfV(v map[int]interface{}, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathET[T]) EncMapIntStringV(v map[int]string, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathET[T]) EncMapIntBytesV(v map[int][]byte, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathET[T]) EncMapIntUint8V(v map[int]uint8, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathET[T]) EncMapIntUint64V(v map[int]uint64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathET[T]) EncMapIntIntV(v map[int]int, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathET[T]) EncMapIntInt32V(v map[int]int32, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathET[T]) EncMapIntFloat64V(v map[int]float64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathET[T]) EncMapIntBoolV(v map[int]bool, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathET[T]) EncMapInt32IntfV(v map[int32]interface{}, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathET[T]) EncMapInt32StringV(v map[int32]string, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathET[T]) EncMapInt32BytesV(v map[int32][]byte, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathET[T]) EncMapInt32Uint8V(v map[int32]uint8, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathET[T]) EncMapInt32Uint64V(v map[int32]uint64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathET[T]) EncMapInt32IntV(v map[int32]int, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathET[T]) EncMapInt32Int32V(v map[int32]int32, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathET[T]) EncMapInt32Float64V(v map[int32]float64, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoder[T]) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathET[T]{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathET[T]) EncMapInt32BoolV(v map[int32]bool, e *encoder[T]) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +// -- decode + +// -- -- fast path type switch +func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool { + var ft fastpathDT[T] + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (d *decoder[T]) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDT[T]) DecSliceIntfY(v []interface{}, d *decoder[T]) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceIntfN(v []interface{}, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDT[T]) DecSliceStringY(v []string, d *decoder[T]) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceStringN(v []string, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDT[T]) DecSliceBytesY(v [][]byte, d *decoder[T]) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceBytesN(v [][]byte, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDT[T]) DecSliceFloat32Y(v []float32, d *decoder[T]) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceFloat32N(v []float32, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDT[T]) DecSliceFloat64Y(v []float64, d *decoder[T]) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceFloat64N(v []float64, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDT[T]) DecSliceUint8Y(v []uint8, d *decoder[T]) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDT[T]) DecSliceUint8N(v []uint8, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoder[T]) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDT[T]) DecSliceUint64Y(v []uint64, d *decoder[T]) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceUint64N(v []uint64, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDT[T]) DecSliceIntY(v []int, d *decoder[T]) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceIntN(v []int, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDT[T]) DecSliceInt32Y(v []int32, d *decoder[T]) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceInt32N(v []int32, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDT[T]) DecSliceInt64Y(v []int64, d *decoder[T]) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceInt64N(v []int64, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoder[T]) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDT[T]) DecSliceBoolY(v []bool, d *decoder[T]) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDT[T]) DecSliceBoolN(v []bool, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoder[T]) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoder[T]) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringStringL(v map[string]string, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoder[T]) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoder[T]) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoder[T]) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoder[T]) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringIntL(v map[string]int, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoder[T]) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoder[T]) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoder[T]) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoder[T]) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoder[T]) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoder[T]) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoder[T]) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoder[T]) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoder[T]) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoder[T]) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoder[T]) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoder[T]) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoder[T]) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoder[T]) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoder[T]) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoder[T]) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoder[T]) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoder[T]) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoder[T]) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoder[T]) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoder[T]) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoder[T]) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoder[T]) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntStringL(v map[int]string, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoder[T]) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoder[T]) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoder[T]) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoder[T]) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntIntL(v map[int]int, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoder[T]) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoder[T]) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoder[T]) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoder[T]) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoder[T]) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoder[T]) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoder[T]) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoder[T]) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoder[T]) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoder[T]) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoder[T]) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoder[T]) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/base.notfastpath.go similarity index 62% rename from vendor/github.com/ugorji/go/codec/fast-path.not.go rename to vendor/github.com/ugorji/go/codec/base.notfastpath.go index 3fcc8fd3a..b4c41fb9f 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ b/vendor/github.com/ugorji/go/codec/base.notfastpath.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a MIT license found in the LICENSE file. //go:build notfastpath || codec.notfastpath -// +build notfastpath codec.notfastpath package codec @@ -18,24 +17,18 @@ const fastpathEnabled = false // This tag disables fastpath during build, allowing for faster build, test execution, // short-program runs, etc. -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } - // func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } // func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } -type fastpathT struct{} -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) +func fastpathAvIndex(rtid uintptr) (uint, bool) { return 0, false } + +type fastpathRtRtid struct { + rtid uintptr + rt reflect.Type } -type fastpathA [0]fastpathE -func fastpathAvIndex(rtid uintptr) int { return -1 } +type fastpathARtRtid [0]fastpathRtRtid -var fastpathAv fastpathA -var fastpathTV fastpathT +var fastpathAvRtRtid fastpathARtRtid diff --git a/vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go b/vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go new file mode 100644 index 000000000..a692849b5 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go @@ -0,0 +1,26 @@ +//go:build notfastpath || (codec.notfastpath && (notmono || codec.notmono)) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import "reflect" + +// type fastpathT struct{} +type fastpathE[T encDriver] struct { + rt reflect.Type + encfn func(*encoder[T], *encFnInfo, reflect.Value) +} +type fastpathD[T decDriver] struct { + rt reflect.Type + decfn func(*decoder[T], *decFnInfo, reflect.Value) +} +type fastpathEs[T encDriver] [0]fastpathE[T] +type fastpathDs[T decDriver] [0]fastpathD[T] + +func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { return false } +func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool { return false } + +func (helperEncDriver[T]) fastpathEList() (v *fastpathEs[T]) { return } +func (helperDecDriver[T]) fastpathDList() (v *fastpathDs[T]) { return } diff --git a/vendor/github.com/ugorji/go/codec/binc.base.go b/vendor/github.com/ugorji/go/codec/binc.base.go new file mode 100644 index 000000000..29da5acdb --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.base.go @@ -0,0 +1,194 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "time" +) + +// Symbol management: +// - symbols are stored in a symbol map during encoding and decoding. +// - the symbols persist until the (En|De)coder ResetXXX method is called. + +const bincDoPrune = true + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + _ // bincVdUnicodeOther + bincVdSymbol + + _ // bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + _ byte = iota // bincFlBin16 + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +const bincBdNil = 0 // bincVdSpecial<<4 | bincSpNil // staticcheck barfs on this (SA4016) + +var ( + bincdescSpecialVsNames = map[byte]string{ + bincSpNil: "nil", + bincSpFalse: "false", + bincSpTrue: "true", + bincSpNan: "float", + bincSpPosInf: "float", + bincSpNegInf: "float", + bincSpZeroFloat: "float", + bincSpZero: "uint", + bincSpNegOne: "int", + } + bincdescVdNames = map[byte]string{ + bincVdSpecial: "special", + bincVdSmallInt: "uint", + bincVdPosInt: "uint", + bincVdFloat: "float", + bincVdSymbol: "string", + bincVdString: "string", + bincVdByteArray: "bytes", + bincVdTimestamp: "time", + bincVdCustomExt: "ext", + bincVdArray: "array", + bincVdMap: "map", + } +) + +func bincdescbd(bd byte) (s string) { + return bincdesc(bd>>4, bd&0x0f) +} + +func bincdesc(vd, vs byte) (s string) { + if vd == bincVdSpecial { + s = bincdescSpecialVsNames[vs] + } else { + s = bincdescVdNames[vd] + } + if s == "" { + s = "unknown" + } + return +} + +type bincEncState struct { + m map[string]uint16 // symbols +} + +// func (e *bincEncState) restoreState(v interface{}) { e.m = v.(map[string]uint16) } +// func (e bincEncState) captureState() interface{} { return e.m } +// func (e *bincEncState) resetState() { e.m = nil } +// func (e *bincEncState) reset() { e.resetState() } +func (e *bincEncState) reset() { e.m = nil } + +type bincDecState struct { + bdRead bool + bd byte + vd byte + vs byte + + _ bool + // MARKER: consider using binary search here instead of a map (ie bincDecSymbol) + s map[uint16][]byte +} + +// func (x bincDecState) captureState() interface{} { return x } +// func (x *bincDecState) resetState() { *x = bincDecState{} } +// func (x *bincDecState) reset() { x.resetState() } +// func (x *bincDecState) restoreState(v interface{}) { *x = v.(bincDecState) } +func (x *bincDecState) reset() { *x = bincDecState{} } + +//------------------------------------ + +// BincHandle is a Handle for the Binc Schema-Free Encoding Format +// defined at https://github.com/ugorji/binc . +// +// BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +// Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + binaryEncodingType + notJsonType + // noElemSeparators + BasicHandle + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Values: + // - 0: default: library uses best judgement + // - 1: use symbols + // - 2: do not use symbols + AsSymbols uint8 + + // AsSymbols: may later on introduce more options ... + // - m: map keys + // - s: struct fields + // - n: none + // - a: all: same as m, s, ... + + // _ [7]uint64 // padding (cache-aligned) +} + +// Name returns the name of the handle: binc +func (h *BincHandle) Name() string { return "binc" } + +func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) } + +// SetBytesExt sets an extension +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, makeExt(ext)) +} + +// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + +func bincEncodeTime(t time.Time) []byte { + return customEncodeTime(t) +} + +func bincDecodeTime(bs []byte) (tt time.Time, err error) { + return customDecodeTime(bs) +} diff --git a/vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go new file mode 100644 index 000000000..7721027e1 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go @@ -0,0 +1,12482 @@ +//go:build !notmono && !codec.notmono && !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "slices" + "sort" +) + +type fastpathEBincBytes struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderBincBytes, *encFnInfo, reflect.Value) +} +type fastpathDBincBytes struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderBincBytes, *decFnInfo, reflect.Value) +} +type fastpathEsBincBytes [56]fastpathEBincBytes +type fastpathDsBincBytes [56]fastpathDBincBytes +type fastpathETBincBytes struct{} +type fastpathDTBincBytes struct{} + +func (helperEncDriverBincBytes) fastpathEList() *fastpathEsBincBytes { + var i uint = 0 + var s fastpathEsBincBytes + fn := func(v interface{}, fe func(*encoderBincBytes, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathEBincBytes{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderBincBytes).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderBincBytes).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderBincBytes).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderBincBytes).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderBincBytes).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderBincBytes).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderBincBytes).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderBincBytes).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderBincBytes).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderBincBytes).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderBincBytes).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderBincBytes).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderBincBytes).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderBincBytes).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderBincBytes).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderBincBytes).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderBincBytes).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderBincBytes).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderBincBytes).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderBincBytes).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderBincBytes).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderBincBytes).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderBincBytes).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderBincBytes).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderBincBytes).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderBincBytes).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderBincBytes).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderBincBytes).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderBincBytes).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderBincBytes).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderBincBytes).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderBincBytes).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderBincBytes).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderBincBytes).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderBincBytes).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderBincBytes).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderBincBytes).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderBincBytes).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderBincBytes).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderBincBytes).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderBincBytes).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderBincBytes).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderBincBytes).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderBincBytes).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderBincBytes).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderBincBytes).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderBincBytes).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderBincBytes).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderBincBytes).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderBincBytes).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderBincBytes).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderBincBytes).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderBincBytes).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderBincBytes).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderBincBytes).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderBincBytes).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverBincBytes) fastpathDList() *fastpathDsBincBytes { + var i uint = 0 + var s fastpathDsBincBytes + fn := func(v interface{}, fd func(*decoderBincBytes, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDBincBytes{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderBincBytes).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderBincBytes).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderBincBytes).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderBincBytes).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderBincBytes).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderBincBytes).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderBincBytes).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderBincBytes).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderBincBytes).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderBincBytes).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderBincBytes).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderBincBytes).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderBincBytes).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderBincBytes).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderBincBytes).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderBincBytes).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderBincBytes).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderBincBytes).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderBincBytes).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderBincBytes).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderBincBytes).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderBincBytes).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderBincBytes).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderBincBytes).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderBincBytes).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderBincBytes).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderBincBytes).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderBincBytes).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderBincBytes).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderBincBytes).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderBincBytes).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderBincBytes).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderBincBytes).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderBincBytes).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderBincBytes).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderBincBytes).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderBincBytes).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderBincBytes).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderBincBytes).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderBincBytes).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderBincBytes).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderBincBytes).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderBincBytes).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderBincBytes).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderBincBytes).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderBincBytes).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderBincBytes).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderBincBytes).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderBincBytes).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderBincBytes).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderBincBytes).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderBincBytes).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderBincBytes).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderBincBytes).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderBincBytes).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderBincBytes).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverBincBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincBytes) bool { + var ft fastpathETBincBytes + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderBincBytes) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETBincBytes) EncSliceIntfV(v []interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceIntfV(v []interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETBincBytes) EncSliceStringV(v []string, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceStringV(v []string, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETBincBytes) EncSliceBytesV(v [][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceBytesV(v [][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETBincBytes) EncSliceFloat32V(v []float32, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceFloat32V(v []float32, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETBincBytes) EncSliceFloat64V(v []float64, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceFloat64V(v []float64, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETBincBytes) EncSliceUint8V(v []uint8, e *encoderBincBytes) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETBincBytes) EncAsMapSliceUint8V(v []uint8, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETBincBytes) EncSliceUint64V(v []uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceUint64V(v []uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETBincBytes) EncSliceIntV(v []int, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceIntV(v []int, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETBincBytes) EncSliceInt32V(v []int32, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceInt32V(v []int32, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETBincBytes) EncSliceInt64V(v []int64, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceInt64V(v []int64, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincBytes + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETBincBytes) EncSliceBoolV(v []bool, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincBytes) EncAsMapSliceBoolV(v []bool, e *encoderBincBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETBincBytes) EncMapStringIntfV(v map[string]interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETBincBytes) EncMapStringStringV(v map[string]string, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETBincBytes) EncMapStringBytesV(v map[string][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETBincBytes) EncMapStringUint8V(v map[string]uint8, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETBincBytes) EncMapStringUint64V(v map[string]uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETBincBytes) EncMapStringIntV(v map[string]int, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETBincBytes) EncMapStringInt32V(v map[string]int32, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETBincBytes) EncMapStringFloat64V(v map[string]float64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETBincBytes) EncMapStringBoolV(v map[string]bool, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETBincBytes) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETBincBytes) EncMapUint8StringV(v map[uint8]string, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETBincBytes) EncMapUint8BytesV(v map[uint8][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETBincBytes) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETBincBytes) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETBincBytes) EncMapUint8IntV(v map[uint8]int, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETBincBytes) EncMapUint8Int32V(v map[uint8]int32, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETBincBytes) EncMapUint8Float64V(v map[uint8]float64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETBincBytes) EncMapUint8BoolV(v map[uint8]bool, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETBincBytes) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETBincBytes) EncMapUint64StringV(v map[uint64]string, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETBincBytes) EncMapUint64BytesV(v map[uint64][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETBincBytes) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETBincBytes) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETBincBytes) EncMapUint64IntV(v map[uint64]int, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETBincBytes) EncMapUint64Int32V(v map[uint64]int32, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETBincBytes) EncMapUint64Float64V(v map[uint64]float64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETBincBytes) EncMapUint64BoolV(v map[uint64]bool, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETBincBytes) EncMapIntIntfV(v map[int]interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETBincBytes) EncMapIntStringV(v map[int]string, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETBincBytes) EncMapIntBytesV(v map[int][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETBincBytes) EncMapIntUint8V(v map[int]uint8, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETBincBytes) EncMapIntUint64V(v map[int]uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETBincBytes) EncMapIntIntV(v map[int]int, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETBincBytes) EncMapIntInt32V(v map[int]int32, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETBincBytes) EncMapIntFloat64V(v map[int]float64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETBincBytes) EncMapIntBoolV(v map[int]bool, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETBincBytes) EncMapInt32IntfV(v map[int32]interface{}, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETBincBytes) EncMapInt32StringV(v map[int32]string, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETBincBytes) EncMapInt32BytesV(v map[int32][]byte, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETBincBytes) EncMapInt32Uint8V(v map[int32]uint8, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETBincBytes) EncMapInt32Uint64V(v map[int32]uint64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETBincBytes) EncMapInt32IntV(v map[int32]int, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETBincBytes) EncMapInt32Int32V(v map[int32]int32, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETBincBytes) EncMapInt32Float64V(v map[int32]float64, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincBytes) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincBytes{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETBincBytes) EncMapInt32BoolV(v map[int32]bool, e *encoderBincBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverBincBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincBytes) bool { + var ft fastpathDTBincBytes + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderBincBytes) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTBincBytes) DecSliceIntfY(v []interface{}, d *decoderBincBytes) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceIntfN(v []interface{}, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTBincBytes) DecSliceStringY(v []string, d *decoderBincBytes) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceStringN(v []string, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTBincBytes) DecSliceBytesY(v [][]byte, d *decoderBincBytes) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceBytesN(v [][]byte, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTBincBytes) DecSliceFloat32Y(v []float32, d *decoderBincBytes) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceFloat32N(v []float32, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTBincBytes) DecSliceFloat64Y(v []float64, d *decoderBincBytes) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceFloat64N(v []float64, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTBincBytes) DecSliceUint8Y(v []uint8, d *decoderBincBytes) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTBincBytes) DecSliceUint8N(v []uint8, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderBincBytes) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTBincBytes) DecSliceUint64Y(v []uint64, d *decoderBincBytes) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceUint64N(v []uint64, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTBincBytes) DecSliceIntY(v []int, d *decoderBincBytes) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceIntN(v []int, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTBincBytes) DecSliceInt32Y(v []int32, d *decoderBincBytes) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceInt32N(v []int32, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTBincBytes) DecSliceInt64Y(v []int64, d *decoderBincBytes) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceInt64N(v []int64, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTBincBytes) DecSliceBoolY(v []bool, d *decoderBincBytes) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincBytes) DecSliceBoolN(v []bool, d *decoderBincBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderBincBytes) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincBytes) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringStringL(v map[string]string, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincBytes) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincBytes) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincBytes) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincBytes) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringIntL(v map[string]int, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincBytes) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincBytes) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincBytes) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincBytes) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincBytes) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincBytes) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincBytes) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincBytes) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincBytes) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincBytes) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincBytes) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincBytes) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincBytes) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincBytes) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincBytes) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincBytes) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincBytes) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincBytes) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincBytes) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincBytes) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincBytes) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincBytes) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincBytes) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntStringL(v map[int]string, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincBytes) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincBytes) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincBytes) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincBytes) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntIntL(v map[int]int, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincBytes) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincBytes) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincBytes) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincBytes) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincBytes) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincBytes) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincBytes) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincBytes) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincBytes) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincBytes) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincBytes) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincBytes) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincBytes) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderBincBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} + +type fastpathEBincIO struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderBincIO, *encFnInfo, reflect.Value) +} +type fastpathDBincIO struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderBincIO, *decFnInfo, reflect.Value) +} +type fastpathEsBincIO [56]fastpathEBincIO +type fastpathDsBincIO [56]fastpathDBincIO +type fastpathETBincIO struct{} +type fastpathDTBincIO struct{} + +func (helperEncDriverBincIO) fastpathEList() *fastpathEsBincIO { + var i uint = 0 + var s fastpathEsBincIO + fn := func(v interface{}, fe func(*encoderBincIO, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathEBincIO{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderBincIO).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderBincIO).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderBincIO).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderBincIO).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderBincIO).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderBincIO).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderBincIO).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderBincIO).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderBincIO).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderBincIO).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderBincIO).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderBincIO).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderBincIO).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderBincIO).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderBincIO).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderBincIO).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderBincIO).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderBincIO).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderBincIO).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderBincIO).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderBincIO).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderBincIO).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderBincIO).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderBincIO).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderBincIO).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderBincIO).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderBincIO).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderBincIO).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderBincIO).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderBincIO).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderBincIO).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderBincIO).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderBincIO).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderBincIO).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderBincIO).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderBincIO).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderBincIO).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderBincIO).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderBincIO).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderBincIO).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderBincIO).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderBincIO).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderBincIO).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderBincIO).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderBincIO).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderBincIO).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderBincIO).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderBincIO).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderBincIO).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderBincIO).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderBincIO).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderBincIO).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderBincIO).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderBincIO).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderBincIO).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderBincIO).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverBincIO) fastpathDList() *fastpathDsBincIO { + var i uint = 0 + var s fastpathDsBincIO + fn := func(v interface{}, fd func(*decoderBincIO, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDBincIO{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderBincIO).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderBincIO).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderBincIO).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderBincIO).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderBincIO).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderBincIO).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderBincIO).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderBincIO).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderBincIO).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderBincIO).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderBincIO).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderBincIO).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderBincIO).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderBincIO).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderBincIO).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderBincIO).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderBincIO).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderBincIO).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderBincIO).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderBincIO).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderBincIO).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderBincIO).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderBincIO).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderBincIO).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderBincIO).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderBincIO).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderBincIO).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderBincIO).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderBincIO).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderBincIO).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderBincIO).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderBincIO).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderBincIO).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderBincIO).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderBincIO).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderBincIO).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderBincIO).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderBincIO).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderBincIO).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderBincIO).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderBincIO).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderBincIO).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderBincIO).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderBincIO).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderBincIO).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderBincIO).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderBincIO).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderBincIO).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderBincIO).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderBincIO).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderBincIO).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderBincIO).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderBincIO).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderBincIO).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderBincIO).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderBincIO).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverBincIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincIO) bool { + var ft fastpathETBincIO + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderBincIO) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETBincIO) EncSliceIntfV(v []interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceIntfV(v []interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETBincIO) EncSliceStringV(v []string, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceStringV(v []string, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETBincIO) EncSliceBytesV(v [][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceBytesV(v [][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETBincIO) EncSliceFloat32V(v []float32, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceFloat32V(v []float32, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETBincIO) EncSliceFloat64V(v []float64, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceFloat64V(v []float64, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETBincIO) EncSliceUint8V(v []uint8, e *encoderBincIO) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETBincIO) EncAsMapSliceUint8V(v []uint8, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETBincIO) EncSliceUint64V(v []uint64, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceUint64V(v []uint64, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETBincIO) EncSliceIntV(v []int, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceIntV(v []int, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETBincIO) EncSliceInt32V(v []int32, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceInt32V(v []int32, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETBincIO) EncSliceInt64V(v []int64, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceInt64V(v []int64, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETBincIO + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETBincIO) EncSliceBoolV(v []bool, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETBincIO) EncAsMapSliceBoolV(v []bool, e *encoderBincIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETBincIO) EncMapStringIntfV(v map[string]interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETBincIO) EncMapStringStringV(v map[string]string, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETBincIO) EncMapStringBytesV(v map[string][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETBincIO) EncMapStringUint8V(v map[string]uint8, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETBincIO) EncMapStringUint64V(v map[string]uint64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETBincIO) EncMapStringIntV(v map[string]int, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETBincIO) EncMapStringInt32V(v map[string]int32, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETBincIO) EncMapStringFloat64V(v map[string]float64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETBincIO) EncMapStringBoolV(v map[string]bool, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETBincIO) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETBincIO) EncMapUint8StringV(v map[uint8]string, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETBincIO) EncMapUint8BytesV(v map[uint8][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETBincIO) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETBincIO) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETBincIO) EncMapUint8IntV(v map[uint8]int, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETBincIO) EncMapUint8Int32V(v map[uint8]int32, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETBincIO) EncMapUint8Float64V(v map[uint8]float64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETBincIO) EncMapUint8BoolV(v map[uint8]bool, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETBincIO) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETBincIO) EncMapUint64StringV(v map[uint64]string, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETBincIO) EncMapUint64BytesV(v map[uint64][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETBincIO) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETBincIO) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETBincIO) EncMapUint64IntV(v map[uint64]int, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETBincIO) EncMapUint64Int32V(v map[uint64]int32, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETBincIO) EncMapUint64Float64V(v map[uint64]float64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETBincIO) EncMapUint64BoolV(v map[uint64]bool, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETBincIO) EncMapIntIntfV(v map[int]interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETBincIO) EncMapIntStringV(v map[int]string, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETBincIO) EncMapIntBytesV(v map[int][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETBincIO) EncMapIntUint8V(v map[int]uint8, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETBincIO) EncMapIntUint64V(v map[int]uint64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETBincIO) EncMapIntIntV(v map[int]int, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETBincIO) EncMapIntInt32V(v map[int]int32, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETBincIO) EncMapIntFloat64V(v map[int]float64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETBincIO) EncMapIntBoolV(v map[int]bool, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETBincIO) EncMapInt32IntfV(v map[int32]interface{}, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETBincIO) EncMapInt32StringV(v map[int32]string, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETBincIO) EncMapInt32BytesV(v map[int32][]byte, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETBincIO) EncMapInt32Uint8V(v map[int32]uint8, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETBincIO) EncMapInt32Uint64V(v map[int32]uint64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETBincIO) EncMapInt32IntV(v map[int32]int, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETBincIO) EncMapInt32Int32V(v map[int32]int32, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETBincIO) EncMapInt32Float64V(v map[int32]float64, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderBincIO) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETBincIO{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETBincIO) EncMapInt32BoolV(v map[int32]bool, e *encoderBincIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverBincIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincIO) bool { + var ft fastpathDTBincIO + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderBincIO) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTBincIO) DecSliceIntfY(v []interface{}, d *decoderBincIO) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceIntfN(v []interface{}, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTBincIO) DecSliceStringY(v []string, d *decoderBincIO) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceStringN(v []string, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTBincIO) DecSliceBytesY(v [][]byte, d *decoderBincIO) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceBytesN(v [][]byte, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTBincIO) DecSliceFloat32Y(v []float32, d *decoderBincIO) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceFloat32N(v []float32, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTBincIO) DecSliceFloat64Y(v []float64, d *decoderBincIO) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceFloat64N(v []float64, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTBincIO) DecSliceUint8Y(v []uint8, d *decoderBincIO) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTBincIO) DecSliceUint8N(v []uint8, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderBincIO) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTBincIO) DecSliceUint64Y(v []uint64, d *decoderBincIO) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceUint64N(v []uint64, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTBincIO) DecSliceIntY(v []int, d *decoderBincIO) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceIntN(v []int, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTBincIO) DecSliceInt32Y(v []int32, d *decoderBincIO) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceInt32N(v []int32, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTBincIO) DecSliceInt64Y(v []int64, d *decoderBincIO) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceInt64N(v []int64, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTBincIO) DecSliceBoolY(v []bool, d *decoderBincIO) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTBincIO) DecSliceBoolN(v []bool, d *decoderBincIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderBincIO) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincIO) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringStringL(v map[string]string, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincIO) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincIO) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincIO) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincIO) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringIntL(v map[string]int, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincIO) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincIO) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincIO) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincIO) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincIO) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincIO) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincIO) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincIO) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincIO) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincIO) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincIO) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincIO) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincIO) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincIO) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincIO) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincIO) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincIO) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincIO) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincIO) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincIO) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincIO) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincIO) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincIO) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntStringL(v map[int]string, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincIO) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincIO) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincIO) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincIO) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntIntL(v map[int]int, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincIO) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincIO) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincIO) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderBincIO) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderBincIO) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderBincIO) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderBincIO) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderBincIO) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderBincIO) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderBincIO) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderBincIO) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderBincIO) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTBincIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTBincIO) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderBincIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go index 9ed15a0bf..34513550d 100644 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -1,177 +1,75 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "io" "math" "reflect" "time" "unicode/utf8" ) -// Symbol management: -// - symbols are stored in a symbol map during encoding and decoding. -// - the symbols persist until the (En|De)coder ResetXXX method is called. - -const bincDoPrune = true - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - _ // bincVdUnicodeOther - bincVdSymbol - - _ // bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - _ byte = iota // bincFlBin16 - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -const bincBdNil = 0 // bincVdSpecial<<4 | bincSpNil // staticcheck barfs on this (SA4016) - -var ( - bincdescSpecialVsNames = map[byte]string{ - bincSpNil: "nil", - bincSpFalse: "false", - bincSpTrue: "true", - bincSpNan: "float", - bincSpPosInf: "float", - bincSpNegInf: "float", - bincSpZeroFloat: "float", - bincSpZero: "uint", - bincSpNegOne: "int", - } - bincdescVdNames = map[byte]string{ - bincVdSpecial: "special", - bincVdSmallInt: "uint", - bincVdPosInt: "uint", - bincVdFloat: "float", - bincVdSymbol: "string", - bincVdString: "string", - bincVdByteArray: "bytes", - bincVdTimestamp: "time", - bincVdCustomExt: "ext", - bincVdArray: "array", - bincVdMap: "map", - } -) - -func bincdescbd(bd byte) (s string) { - return bincdesc(bd>>4, bd&0x0f) -} - -func bincdesc(vd, vs byte) (s string) { - if vd == bincVdSpecial { - s = bincdescSpecialVsNames[vs] - } else { - s = bincdescVdNames[vd] - } - if s == "" { - s = "unknown" - } - return -} - -type bincEncState struct { - m map[string]uint16 // symbols -} - -func (e bincEncState) captureState() interface{} { return e.m } -func (e *bincEncState) resetState() { e.m = nil } -func (e *bincEncState) reset() { e.resetState() } -func (e *bincEncState) restoreState(v interface{}) { e.m = v.(map[string]uint16) } - -type bincEncDriver struct { +type bincEncDriver[T encWriter] struct { noBuiltInTypes encDriverNoopContainerWriter + encDriverContainerNoTrackerT + encInit2er + h *BincHandle + e *encoderBase + w T bincEncState - - e Encoder } -func (e *bincEncDriver) encoder() *Encoder { - return &e.e +func (e *bincEncDriver[T]) EncodeNil() { + e.w.writen1(bincBdNil) } -func (e *bincEncDriver) EncodeNil() { - e.e.encWr.writen1(bincBdNil) -} - -func (e *bincEncDriver) EncodeTime(t time.Time) { +func (e *bincEncDriver[T]) EncodeTime(t time.Time) { if t.IsZero() { e.EncodeNil() } else { bs := bincEncodeTime(t) - e.e.encWr.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.e.encWr.writeb(bs) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) } } -func (e *bincEncDriver) EncodeBool(b bool) { +func (e *bincEncDriver[T]) EncodeBool(b bool) { if b { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpTrue) + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) } else { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpFalse) + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) } } -func (e *bincEncDriver) encSpFloat(f float64) (done bool) { +func (e *bincEncDriver[T]) encSpFloat(f float64) (done bool) { if f == 0 { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) } else if math.IsNaN(float64(f)) { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNan) + e.w.writen1(bincVdSpecial<<4 | bincSpNan) } else if math.IsInf(float64(f), +1) { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpPosInf) + e.w.writen1(bincVdSpecial<<4 | bincSpPosInf) } else if math.IsInf(float64(f), -1) { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNegInf) + e.w.writen1(bincVdSpecial<<4 | bincSpNegInf) } else { return } return true } -func (e *bincEncDriver) EncodeFloat32(f float32) { +func (e *bincEncDriver[T]) EncodeFloat32(f float32) { if !e.encSpFloat(float64(f)) { - e.e.encWr.writen1(bincVdFloat<<4 | bincFlBin32) - bigen.writeUint32(e.e.w(), math.Float32bits(f)) + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) } } -func (e *bincEncDriver) EncodeFloat64(f float64) { +func (e *bincEncDriver[T]) EncodeFloat64(f float64) { if e.encSpFloat(f) { return } @@ -182,64 +80,64 @@ func (e *bincEncDriver) EncodeFloat64(f float64) { } i++ if i <= 6 { - e.e.encWr.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.e.encWr.writen1(byte(i)) - e.e.encWr.writeb(b[:i]) + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(b[:i]) return } } - e.e.encWr.writen1(bincVdFloat<<4 | bincFlBin64) - e.e.encWr.writen8(b) + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writen8(b) } -func (e *bincEncDriver) encIntegerPrune32(bd byte, pos bool, v uint64) { +func (e *bincEncDriver[T]) encIntegerPrune32(bd byte, pos bool, v uint64) { b := bigen.PutUint32(uint32(v)) if bincDoPrune { i := byte(pruneSignExt(b[:], pos)) - e.e.encWr.writen1(bd | 3 - i) - e.e.encWr.writeb(b[i:]) + e.w.writen1(bd | 3 - i) + e.w.writeb(b[i:]) } else { - e.e.encWr.writen1(bd | 3) - e.e.encWr.writen4(b) + e.w.writen1(bd | 3) + e.w.writen4(b) } } -func (e *bincEncDriver) encIntegerPrune64(bd byte, pos bool, v uint64) { +func (e *bincEncDriver[T]) encIntegerPrune64(bd byte, pos bool, v uint64) { b := bigen.PutUint64(v) if bincDoPrune { i := byte(pruneSignExt(b[:], pos)) - e.e.encWr.writen1(bd | 7 - i) - e.e.encWr.writeb(b[i:]) + e.w.writen1(bd | 7 - i) + e.w.writeb(b[i:]) } else { - e.e.encWr.writen1(bd | 7) - e.e.encWr.writen8(b) + e.w.writen1(bd | 7) + e.w.writen8(b) } } -func (e *bincEncDriver) EncodeInt(v int64) { +func (e *bincEncDriver[T]) EncodeInt(v int64) { if v >= 0 { e.encUint(bincVdPosInt<<4, true, uint64(v)) } else if v == -1 { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNegOne) + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) } else { e.encUint(bincVdNegInt<<4, false, uint64(-v)) } } -func (e *bincEncDriver) EncodeUint(v uint64) { +func (e *bincEncDriver[T]) EncodeUint(v uint64) { e.encUint(bincVdPosInt<<4, true, v) } -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { +func (e *bincEncDriver[T]) encUint(bd byte, pos bool, v uint64) { if v == 0 { - e.e.encWr.writen1(bincVdSpecial<<4 | bincSpZero) + e.w.writen1(bincVdSpecial<<4 | bincSpZero) } else if pos && v >= 1 && v <= 16 { - e.e.encWr.writen1(bincVdSmallInt<<4 | byte(v-1)) + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) } else if v <= math.MaxUint8 { - e.e.encWr.writen2(bd|0x0, byte(v)) + e.w.writen2(bd, byte(v)) // bd|0x0 } else if v <= math.MaxUint16 { - e.e.encWr.writen1(bd | 0x01) - bigen.writeUint16(e.e.w(), uint16(v)) + e.w.writen1(bd | 0x01) + e.w.writen2(bigen.PutUint16(uint16(v))) } else if v <= math.MaxUint32 { e.encIntegerPrune32(bd, pos, v) } else { @@ -247,21 +145,21 @@ func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { } } -func (e *bincEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (e *bincEncDriver[T]) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { var bs0, bs []byte if ext == SelfExt { bs0 = e.e.blist.get(1024) bs = bs0 - e.e.sideEncode(v, basetype, &bs) + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) } else { bs = ext.WriteExt(v) } if bs == nil { - e.EncodeNil() + e.writeNilBytes() goto END } e.encodeExtPreamble(uint8(xtag), len(bs)) - e.e.encWr.writeb(bs) + e.w.writeb(bs) END: if ext == SelfExt { e.e.blist.put(bs) @@ -271,25 +169,35 @@ END: } } -func (e *bincEncDriver) EncodeRawExt(re *RawExt) { +func (e *bincEncDriver[T]) EncodeRawExt(re *RawExt) { e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.e.encWr.writeb(re.Data) + e.w.writeb(re.Data) } -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { +func (e *bincEncDriver[T]) encodeExtPreamble(xtag byte, length int) { e.encLen(bincVdCustomExt<<4, uint64(length)) - e.e.encWr.writen1(xtag) + e.w.writen1(xtag) } -func (e *bincEncDriver) WriteArrayStart(length int) { +func (e *bincEncDriver[T]) WriteArrayStart(length int) { e.encLen(bincVdArray<<4, uint64(length)) } -func (e *bincEncDriver) WriteMapStart(length int) { +func (e *bincEncDriver[T]) WriteMapStart(length int) { e.encLen(bincVdMap<<4, uint64(length)) } -func (e *bincEncDriver) EncodeSymbol(v string) { +func (e *bincEncDriver[T]) WriteArrayEmpty() { + // e.WriteArrayStart(0) = e.encLen(bincVdArray<<4, 0) + e.w.writen1(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriver[T]) WriteMapEmpty() { + // e.WriteMapStart(0) = e.encLen(bincVdMap<<4, 0) + e.w.writen1(bincVdMap<<4 | uint8(0+4)) +} + +func (e *bincEncDriver[T]) EncodeSymbol(v string) { //symbols only offer benefit when string length > 1. //This is because strings with length 1 take only 2 bytes to store //(bd with embedded length, and single byte for string val). @@ -300,7 +208,7 @@ func (e *bincEncDriver) EncodeSymbol(v string) { return } else if l == 1 { e.encBytesLen(cUTF8, 1) - e.e.encWr.writen1(v[0]) + e.w.writen1(v[0]) return } if e.m == nil { @@ -309,10 +217,10 @@ func (e *bincEncDriver) EncodeSymbol(v string) { ui, ok := e.m[v] if ok { if ui <= math.MaxUint8 { - e.e.encWr.writen2(bincVdSymbol<<4, byte(ui)) + e.w.writen2(bincVdSymbol<<4, byte(ui)) } else { - e.e.encWr.writen1(bincVdSymbol<<4 | 0x8) - bigen.writeUint16(e.e.w(), ui) + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writen2(bigen.PutUint16(ui)) } } else { e.e.seq++ @@ -329,58 +237,83 @@ func (e *bincEncDriver) EncodeSymbol(v string) { lenprec = 3 } if ui <= math.MaxUint8 { - e.e.encWr.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + e.w.writen2(bincVdSymbol<<4|0x4|lenprec, byte(ui)) // bincVdSymbol<<4|0x0|0x4|lenprec } else { - e.e.encWr.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - bigen.writeUint16(e.e.w(), ui) + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writen2(bigen.PutUint16(ui)) } if lenprec == 0 { - e.e.encWr.writen1(byte(l)) + e.w.writen1(byte(l)) } else if lenprec == 1 { - bigen.writeUint16(e.e.w(), uint16(l)) + e.w.writen2(bigen.PutUint16(uint16(l))) } else if lenprec == 2 { - bigen.writeUint32(e.e.w(), uint32(l)) + e.w.writen4(bigen.PutUint32(uint32(l))) } else { - bigen.writeUint64(e.e.w(), uint64(l)) + e.w.writen8(bigen.PutUint64(uint64(l))) } - e.e.encWr.writestr(v) + e.w.writestr(v) } } -func (e *bincEncDriver) EncodeString(v string) { +func (e *bincEncDriver[T]) EncodeString(v string) { if e.h.StringToRaw { e.encLen(bincVdByteArray<<4, uint64(len(v))) if len(v) > 0 { - e.e.encWr.writestr(v) + e.w.writestr(v) } return } e.EncodeStringEnc(cUTF8, v) } -func (e *bincEncDriver) EncodeStringEnc(c charEncoding, v string) { +func (e *bincEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *bincEncDriver[T]) EncodeStringEnc(c charEncoding, v string) { if e.e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 1) { e.EncodeSymbol(v) return } e.encLen(bincVdString<<4, uint64(len(v))) if len(v) > 0 { - e.e.encWr.writestr(v) + e.w.writestr(v) } } -func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) { - if v == nil { - e.EncodeNil() - return - } +func (e *bincEncDriver[T]) EncodeStringBytesRaw(v []byte) { e.encLen(bincVdByteArray<<4, uint64(len(v))) if len(v) > 0 { - e.e.encWr.writeb(v) + e.w.writeb(v) } } -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { +func (e *bincEncDriver[T]) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *bincEncDriver[T]) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = bincBdNil + } + e.w.writen1(v) +} + +func (e *bincEncDriver[T]) writeNilArray() { + e.writeNilOr(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriver[T]) writeNilMap() { + e.writeNilOr(bincVdMap<<4 | uint8(0+4)) +} + +func (e *bincEncDriver[T]) writeNilBytes() { + e.writeNilOr(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriver[T]) encBytesLen(c charEncoding, length uint64) { // MARKER: we currently only support UTF-8 (string) and RAW (bytearray). // We should consider supporting bincUnicodeOther. @@ -391,74 +324,54 @@ func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { } } -func (e *bincEncDriver) encLen(bd byte, l uint64) { +func (e *bincEncDriver[T]) encLen(bd byte, l uint64) { if l < 12 { - e.e.encWr.writen1(bd | uint8(l+4)) + e.w.writen1(bd | uint8(l+4)) } else { e.encLenNumber(bd, l) } } -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { +func (e *bincEncDriver[T]) encLenNumber(bd byte, v uint64) { if v <= math.MaxUint8 { - e.e.encWr.writen2(bd, byte(v)) + e.w.writen2(bd, byte(v)) } else if v <= math.MaxUint16 { - e.e.encWr.writen1(bd | 0x01) - bigen.writeUint16(e.e.w(), uint16(v)) + e.w.writen1(bd | 0x01) + e.w.writen2(bigen.PutUint16(uint16(v))) } else if v <= math.MaxUint32 { - e.e.encWr.writen1(bd | 0x02) - bigen.writeUint32(e.e.w(), uint32(v)) + e.w.writen1(bd | 0x02) + e.w.writen4(bigen.PutUint32(uint32(v))) } else { - e.e.encWr.writen1(bd | 0x03) - bigen.writeUint64(e.e.w(), uint64(v)) + e.w.writen1(bd | 0x03) + e.w.writen8(bigen.PutUint64(uint64(v))) } } //------------------------------------ -type bincDecState struct { - bdRead bool - bd byte - vd byte - vs byte - - _ bool - // MARKER: consider using binary search here instead of a map (ie bincDecSymbol) - s map[uint16][]byte -} - -func (x bincDecState) captureState() interface{} { return x } -func (x *bincDecState) resetState() { *x = bincDecState{} } -func (x *bincDecState) reset() { x.resetState() } -func (x *bincDecState) restoreState(v interface{}) { *x = v.(bincDecState) } - -type bincDecDriver struct { +type bincDecDriver[T decReader] struct { decDriverNoopContainerReader - decDriverNoopNumberHelper + // decDriverNoopNumberHelper + decInit2er noBuiltInTypes h *BincHandle + d *decoderBase + r T bincDecState - d Decoder + + // bytes bool } -func (d *bincDecDriver) decoder() *Decoder { - return &d.d -} - -func (d *bincDecDriver) descBd() string { - return sprintf("%v (%s)", d.bd, bincdescbd(d.bd)) -} - -func (d *bincDecDriver) readNextBd() { - d.bd = d.d.decRd.readn1() +func (d *bincDecDriver[T]) readNextBd() { + d.bd = d.r.readn1() d.vd = d.bd >> 4 d.vs = d.bd & 0x0f d.bdRead = true } -func (d *bincDecDriver) advanceNil() (null bool) { +func (d *bincDecDriver[T]) advanceNil() (null bool) { if !d.bdRead { d.readNextBd() } @@ -469,11 +382,11 @@ func (d *bincDecDriver) advanceNil() (null bool) { return } -func (d *bincDecDriver) TryNil() bool { +func (d *bincDecDriver[T]) TryNil() bool { return d.advanceNil() } -func (d *bincDecDriver) ContainerType() (vt valueType) { +func (d *bincDecDriver[T]) ContainerType() (vt valueType) { if !d.bdRead { d.readNextBd() } @@ -492,33 +405,33 @@ func (d *bincDecDriver) ContainerType() (vt valueType) { return valueTypeUnset } -func (d *bincDecDriver) DecodeTime() (t time.Time) { +func (d *bincDecDriver[T]) DecodeTime() (t time.Time) { if d.advanceNil() { return } if d.vd != bincVdTimestamp { - d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } - t, err := bincDecodeTime(d.d.decRd.readx(uint(d.vs))) + t, err := bincDecodeTime(d.r.readx(uint(d.vs))) halt.onerror(err) d.bdRead = false return } -func (d *bincDecDriver) decFloatPruned(maxlen uint8) { - l := d.d.decRd.readn1() +func (d *bincDecDriver[T]) decFloatPruned(maxlen uint8) { + l := d.r.readn1() if l > maxlen { - d.d.errorf("cannot read float - at most %v bytes used to represent float - received %v bytes", maxlen, l) + halt.errorf("cannot read float - at most %v bytes used to represent float - received %v bytes", maxlen, l) } for i := l; i < maxlen; i++ { d.d.b[i] = 0 } - d.d.decRd.readb(d.d.b[0:l]) + d.r.readb(d.d.b[0:l]) } -func (d *bincDecDriver) decFloatPre32() (b [4]byte) { +func (d *bincDecDriver[T]) decFloatPre32() (b [4]byte) { if d.vs&0x8 == 0 { - b = d.d.decRd.readn4() + b = d.r.readn4() } else { d.decFloatPruned(4) copy(b[:], d.d.b[:]) @@ -526,9 +439,9 @@ func (d *bincDecDriver) decFloatPre32() (b [4]byte) { return } -func (d *bincDecDriver) decFloatPre64() (b [8]byte) { +func (d *bincDecDriver[T]) decFloatPre64() (b [8]byte) { if d.vs&0x8 == 0 { - b = d.d.decRd.readn8() + b = d.r.readn8() } else { d.decFloatPruned(8) copy(b[:], d.d.b[:]) @@ -536,7 +449,7 @@ func (d *bincDecDriver) decFloatPre64() (b [8]byte) { return } -func (d *bincDecDriver) decFloatVal() (f float64) { +func (d *bincDecDriver[T]) decFloatVal() (f float64) { switch d.vs & 0x7 { case bincFlBin32: f = float64(math.Float32frombits(bigen.Uint32(d.decFloatPre32()))) @@ -544,67 +457,68 @@ func (d *bincDecDriver) decFloatVal() (f float64) { f = math.Float64frombits(bigen.Uint64(d.decFloatPre64())) default: // ok = false - d.d.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } return } -func (d *bincDecDriver) decUint() (v uint64) { +func (d *bincDecDriver[T]) decUint() (v uint64) { switch d.vs { case 0: - v = uint64(d.d.decRd.readn1()) + v = uint64(d.r.readn1()) case 1: - v = uint64(bigen.Uint16(d.d.decRd.readn2())) + v = uint64(bigen.Uint16(d.r.readn2())) case 2: - b3 := d.d.decRd.readn3() + b3 := d.r.readn3() var b [4]byte copy(b[1:], b3[:]) v = uint64(bigen.Uint32(b)) case 3: - v = uint64(bigen.Uint32(d.d.decRd.readn4())) + v = uint64(bigen.Uint32(d.r.readn4())) case 4, 5, 6: - var b [8]byte - lim := 7 - d.vs - bs := d.d.b[lim:8] - d.d.decRd.readb(bs) - copy(b[lim:], bs) - v = bigen.Uint64(b) + // lim := 7 - d.vs + // bs := d.d.b[lim:8] + // d.r.readb(bs) + // var b [8]byte + // copy(b[lim:], bs) + // v = bigen.Uint64(b) + bs := d.d.b[:8] + clear(bs) + d.r.readb(bs[(7 - d.vs):]) + v = bigen.Uint64(*(*[8]byte)(bs)) case 7: - v = bigen.Uint64(d.d.decRd.readn8()) + v = bigen.Uint64(d.r.readn8()) default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) + halt.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) } return } -func (d *bincDecDriver) uintBytes() (bs []byte) { +func (d *bincDecDriver[T]) uintBytes() (bs []byte) { switch d.vs { case 0: bs = d.d.b[:1] - bs[0] = d.d.decRd.readn1() + bs[0] = d.r.readn1() + return case 1: bs = d.d.b[:2] - d.d.decRd.readb(bs) case 2: bs = d.d.b[:3] - d.d.decRd.readb(bs) case 3: bs = d.d.b[:4] - d.d.decRd.readb(bs) case 4, 5, 6: lim := 7 - d.vs bs = d.d.b[lim:8] - d.d.decRd.readb(bs) case 7: bs = d.d.b[:8] - d.d.decRd.readb(bs) default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) + halt.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) } + d.r.readb(bs) return } -func (d *bincDecDriver) decInteger() (ui uint64, neg, ok bool) { +func (d *bincDecDriver[T]) decInteger() (ui uint64, neg, ok bool) { ok = true vd, vs := d.vd, d.vs if vd == bincVdPosInt { @@ -622,16 +536,16 @@ func (d *bincDecDriver) decInteger() (ui uint64, neg, ok bool) { ui = 1 } else { ok = false - // d.d.errorf("integer decode has invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + // halt.errorf("integer decode has invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) } } else { ok = false - // d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + // halt.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) } return } -func (d *bincDecDriver) decFloat() (f float64, ok bool) { +func (d *bincDecDriver[T]) decFloat() (f float64, ok bool) { ok = true vd, vs := d.vd, d.vs if vd == bincVdSpecial { @@ -645,7 +559,7 @@ func (d *bincDecDriver) decFloat() (f float64, ok bool) { f = math.Inf(-1) } else { ok = false - // d.d.errorf("float - invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + // halt.errorf("float - invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) } } else if vd == bincVdFloat { f = d.decFloatVal() @@ -655,34 +569,36 @@ func (d *bincDecDriver) decFloat() (f float64, ok bool) { return } -func (d *bincDecDriver) DecodeInt64() (i int64) { +func (d *bincDecDriver[T]) DecodeInt64() (i int64) { if d.advanceNil() { return } - i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger()) + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false) d.bdRead = false return } -func (d *bincDecDriver) DecodeUint64() (ui uint64) { +func (d *bincDecDriver[T]) DecodeUint64() (ui uint64) { if d.advanceNil() { return } - ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger()) + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) d.bdRead = false return } -func (d *bincDecDriver) DecodeFloat64() (f float64) { +func (d *bincDecDriver[T]) DecodeFloat64() (f float64) { if d.advanceNil() { return } - f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat()) + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false) d.bdRead = false return } -func (d *bincDecDriver) DecodeBool() (b bool) { +func (d *bincDecDriver[T]) DecodeBool() (b bool) { if d.advanceNil() { return } @@ -691,201 +607,193 @@ func (d *bincDecDriver) DecodeBool() (b bool) { } else if d.bd == (bincVdSpecial | bincSpTrue) { b = true } else { - d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } d.bdRead = false return } -func (d *bincDecDriver) ReadMapStart() (length int) { +func (d *bincDecDriver[T]) ReadMapStart() (length int) { if d.advanceNil() { return containerLenNil } if d.vd != bincVdMap { - d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } length = d.decLen() d.bdRead = false return } -func (d *bincDecDriver) ReadArrayStart() (length int) { +func (d *bincDecDriver[T]) ReadArrayStart() (length int) { if d.advanceNil() { return containerLenNil } if d.vd != bincVdArray { - d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } length = d.decLen() d.bdRead = false return } -func (d *bincDecDriver) decLen() int { +func (d *bincDecDriver[T]) decLen() int { if d.vs > 3 { return int(d.vs - 4) } return int(d.decLenNumber()) } -func (d *bincDecDriver) decLenNumber() (v uint64) { +func (d *bincDecDriver[T]) decLenNumber() (v uint64) { if x := d.vs; x == 0 { - v = uint64(d.d.decRd.readn1()) + v = uint64(d.r.readn1()) } else if x == 1 { - v = uint64(bigen.Uint16(d.d.decRd.readn2())) + v = uint64(bigen.Uint16(d.r.readn2())) } else if x == 2 { - v = uint64(bigen.Uint32(d.d.decRd.readn4())) + v = uint64(bigen.Uint32(d.r.readn4())) } else { - v = bigen.Uint64(d.d.decRd.readn8()) + v = bigen.Uint64(d.r.readn8()) } return } -// func (d *bincDecDriver) decStringBytes(bs []byte, zerocopy bool) (bs2 []byte) { -func (d *bincDecDriver) DecodeStringAsBytes() (bs2 []byte) { - d.d.decByteState = decByteStateNone +// func (d *bincDecDriver[T]) decStringBytes(bs []byte, zerocopy bool) (bs2 []byte) { +func (d *bincDecDriver[T]) DecodeStringAsBytes() (bs []byte, state dBytesAttachState) { if d.advanceNil() { return } + var cond bool var slen = -1 switch d.vd { case bincVdString, bincVdByteArray: slen = d.decLen() - if d.d.bytes { - d.d.decByteState = decByteStateZerocopy - bs2 = d.d.decRd.rb.readx(uint(slen)) - } else { - d.d.decByteState = decByteStateReuseBuf - bs2 = decByteSlice(d.d.r(), slen, d.d.h.MaxInitLen, d.d.b[:]) - } + bs, cond = d.r.readxb(uint(slen)) + state = d.d.attachState(cond) case bincVdSymbol: // zerocopy doesn't apply for symbols, // as the values must be stored in a table for later use. var symbol uint16 vs := d.vs if vs&0x8 == 0 { - symbol = uint16(d.d.decRd.readn1()) + symbol = uint16(d.r.readn1()) } else { - symbol = uint16(bigen.Uint16(d.d.decRd.readn2())) + symbol = uint16(bigen.Uint16(d.r.readn2())) } if d.s == nil { d.s = make(map[uint16][]byte, 16) } if vs&0x4 == 0 { - bs2 = d.s[symbol] + bs = d.s[symbol] } else { switch vs & 0x3 { case 0: - slen = int(d.d.decRd.readn1()) + slen = int(d.r.readn1()) case 1: - slen = int(bigen.Uint16(d.d.decRd.readn2())) + slen = int(bigen.Uint16(d.r.readn2())) case 2: - slen = int(bigen.Uint32(d.d.decRd.readn4())) + slen = int(bigen.Uint32(d.r.readn4())) case 3: - slen = int(bigen.Uint64(d.d.decRd.readn8())) + slen = int(bigen.Uint64(d.r.readn8())) } // As we are using symbols, do not store any part of // the parameter bs in the map, as it might be a shared buffer. - bs2 = decByteSlice(d.d.r(), slen, d.d.h.MaxInitLen, nil) - d.s[symbol] = bs2 + bs, cond = d.r.readxb(uint(slen)) + bs = d.d.detach2Bytes(bs, d.d.attachState(cond)) + d.s[symbol] = bs } + state = dBytesDetach default: - d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } - if d.h.ValidateUnicode && !utf8.Valid(bs2) { - d.d.errorf("DecodeStringAsBytes: invalid UTF-8: %s", bs2) + if d.h.ValidateUnicode && !utf8.Valid(bs) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", bs) } d.bdRead = false return } -func (d *bincDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { - d.d.decByteState = decByteStateNone +func (d *bincDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) { if d.advanceNil() { return } + var cond bool if d.vd == bincVdArray { - if bs == nil { - bs = d.d.b[:] - d.d.decByteState = decByteStateReuseBuf - } slen := d.ReadArrayStart() - var changed bool - if bs, changed = usableByteSlice(bs, slen); changed { - d.d.decByteState = decByteStateNone - } + bs, cond = usableByteSlice(d.d.buf, slen) for i := 0; i < slen; i++ { bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) } for i := len(bs); i < slen; i++ { bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) } - return bs - } - var clen int - if d.vd == bincVdString || d.vd == bincVdByteArray { - clen = d.decLen() - } else { - d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) - } - d.bdRead = false - if d.d.zerocopy() { - d.d.decByteState = decByteStateZerocopy - return d.d.decRd.rb.readx(uint(clen)) - } - if bs == nil { - bs = d.d.b[:] - d.d.decByteState = decByteStateReuseBuf - } - return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs) -} - -func (d *bincDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { - if xtag > 0xff { - d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) - } - if d.advanceNil() { + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer return } - xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag := uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.setData(xbs, zerocopy) - } else if ext == SelfExt { - d.d.sideDecode(rv, basetype, xbs) + if !(d.vd == bincVdString || d.vd == bincVdByteArray) { + halt.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *bincDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) } else { ext.ReadExt(rv, xbs) } } -func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) { +func (d *bincDecDriver[T]) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *bincDecDriver[T]) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) if d.vd == bincVdCustomExt { l := d.decLen() - xtag = d.d.decRd.readn1() + xtag = d.r.readn1() if verifyTag && xtag != tag { - d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) - } - if d.d.bytes { - xbs = d.d.decRd.rb.readx(uint(l)) - zerocopy = true - } else { - xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:]) + halt.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) } + xbs, ok = d.r.readxb(uint(l)) + bstate = d.d.attachState(ok) + // zerocopy = d.d.bytes } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil) + xbs, bstate = d.DecodeBytes() } else { - d.d.errorf("ext expects extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("ext expects extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } d.bdRead = false + ok = true return } -func (d *bincDecDriver) DecodeNaked() { +func (d *bincDecDriver[T]) DecodeNaked() { if !d.bdRead { d.readNextBd() } @@ -923,7 +831,7 @@ func (d *bincDecDriver) DecodeNaked() { n.v = valueTypeInt n.i = int64(-1) // int8(-1) default: - d.d.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) } case bincVdSmallInt: n.v = valueTypeUint @@ -939,26 +847,22 @@ func (d *bincDecDriver) DecodeNaked() { n.f = d.decFloatVal() case bincVdString: n.v = valueTypeString - n.s = d.d.stringZC(d.DecodeStringAsBytes()) + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) case bincVdByteArray: - d.d.fauxUnionReadRawBytes(false) + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy) case bincVdSymbol: n.v = valueTypeSymbol - n.s = d.d.stringZC(d.DecodeStringAsBytes()) + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) case bincVdTimestamp: n.v = valueTypeTime - tt, err := bincDecodeTime(d.d.decRd.readx(uint(d.vs))) + tt, err := bincDecodeTime(d.r.readx(uint(d.vs))) halt.onerror(err) n.t = tt case bincVdCustomExt: n.v = valueTypeExt l := d.decLen() - n.u = uint64(d.d.decRd.readn1()) - if d.d.bytes { - n.l = d.d.decRd.rb.readx(uint(l)) - } else { - n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:]) - } + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(uint(l)) case bincVdArray: n.v = valueTypeArray decodeFurther = true @@ -966,7 +870,7 @@ func (d *bincDecDriver) DecodeNaked() { n.v = valueTypeMap decodeFurther = true default: - d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } if !decodeFurther { @@ -978,49 +882,39 @@ func (d *bincDecDriver) DecodeNaked() { } } -func (d *bincDecDriver) nextValueBytes(v0 []byte) (v []byte) { +func (d *bincDecDriver[T]) nextValueBytes() (v []byte) { if !d.bdRead { d.readNextBd() } - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - var cursor = d.d.rb.c - 1 - h.append1(&v, d.bd) - v = d.nextValueBytesBdReadR(v) + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() d.bdRead = false - h.bytesRdV(&v, cursor) return } -func (d *bincDecDriver) nextValueBytesR(v0 []byte) (v []byte) { - d.readNextBd() - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - h.append1(&v, d.bd) - return d.nextValueBytesBdReadR(v) -} - -func (d *bincDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { - v = v0 - var h = decNextValueBytesHelper{d: &d.d} +// func (d *bincDecDriver[T]) nextValueBytesR(v0 []byte) (v []byte) { +// d.readNextBd() +// v = v0 +// var h decNextValueBytesHelper +// h.append1(&v, d.bytes, d.bd) +// return d.nextValueBytesBdReadR(v) +// } +func (d *bincDecDriver[T]) nextValueBytesBdReadR() { fnLen := func(vs byte) uint { switch vs { case 0: - x := d.d.decRd.readn1() - h.append1(&v, x) + x := d.r.readn1() return uint(x) case 1: - x := d.d.decRd.readn2() - h.appendN(&v, x[:]...) + x := d.r.readn2() return uint(bigen.Uint16(x)) case 2: - x := d.d.decRd.readn4() - h.appendN(&v, x[:]...) + x := d.r.readn4() return uint(bigen.Uint32(x)) case 3: - x := d.d.decRd.readn8() - h.appendN(&v, x[:]...) + x := d.r.readn8() return uint(bigen.Uint64(x)) default: return uint(vs - 4) @@ -1035,23 +929,20 @@ func (d *bincDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { case bincSpNil, bincSpFalse, bincSpTrue, bincSpNan, bincSpPosInf: // pass case bincSpNegInf, bincSpZeroFloat, bincSpZero, bincSpNegOne: // pass default: - d.d.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) } case bincVdSmallInt: // pass case bincVdPosInt, bincVdNegInt: - bs := d.uintBytes() - h.appendN(&v, bs...) + d.uintBytes() case bincVdFloat: fn := func(xlen byte) { if d.vs&0x8 != 0 { - xlen = d.d.decRd.readn1() - h.append1(&v, xlen) + xlen = d.r.readn1() if xlen > 8 { - d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", xlen) + halt.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", xlen) } } - d.d.decRd.readb(d.d.b[:xlen]) - h.appendN(&v, d.d.b[:xlen]...) + d.r.readb(d.d.b[:xlen]) } switch d.vs & 0x7 { case bincFlBin32: @@ -1059,261 +950,117 @@ func (d *bincDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { case bincFlBin64: fn(8) default: - d.d.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } case bincVdString, bincVdByteArray: clen = fnLen(d.vs) - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.skip(clen) case bincVdSymbol: if d.vs&0x8 == 0 { - h.append1(&v, d.d.decRd.readn1()) + d.r.readn1() } else { - h.appendN(&v, d.d.decRd.rb.readx(2)...) + d.r.skip(2) } if d.vs&0x4 != 0 { clen = fnLen(d.vs & 0x3) - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.skip(clen) } case bincVdTimestamp: - h.appendN(&v, d.d.decRd.readx(uint(d.vs))...) + d.r.skip(uint(d.vs)) case bincVdCustomExt: clen = fnLen(d.vs) - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.readn1() // tag + d.r.skip(clen) case bincVdArray: clen = fnLen(d.vs) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() } case bincVdMap: clen = fnLen(d.vs) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } default: - d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + halt.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } return } -//------------------------------------ - -// BincHandle is a Handle for the Binc Schema-Free Encoding Format -// defined at https://github.com/ugorji/binc . +// ---- // -// BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// The following below are similar across all format files (except for the format name). // -// Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle - binaryEncodingType - // noElemSeparators +// We keep them together here, so that we can easily copy and compare. - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Values: - // - 0: default: library uses best judgement - // - 1: use symbols - // - 2: do not use symbols - AsSymbols uint8 +// ---- - // AsSymbols: may later on introduce more options ... - // - m: map keys - // - s: struct fields - // - n: none - // - a: all: same as m, s, ... - - // _ [7]uint64 // padding (cache-aligned) -} - -// Name returns the name of the handle: binc -func (h *BincHandle) Name() string { return "binc" } - -func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) } - -func (h *BincHandle) newEncDriver() encDriver { - var e = &bincEncDriver{h: h} - e.e.e = e - e.e.init(h) - e.reset() - return e -} - -func (h *BincHandle) newDecDriver() decDriver { - d := &bincDecDriver{h: h} - d.d.d = d - d.d.init(h) - d.reset() - return d -} - -// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -func bincEncodeTime(t time.Time) []byte { - // t := rv2i(rv).(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - btmp := bigen.PutUint64(uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - btmp := bigen.PutUint32(uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - // zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - btmp := bigen.PutUint16(z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// bincDecodeTime decodes a []byte into a time.Time. -func bincDecodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - // if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - } - i = i2 - tsec = int64(bigen.Uint64(btmp)) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printf.d. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - tz = bigen.Uint16([2]byte{bs[i], bs[i+1]}) - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() +func (d *bincEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*BincHandle) + d.e = shared + if shared.bytes { + fp = bincFpEncBytes } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + fp = bincFpEncIO } + // d.w.init() + d.init2(enc) return } -var _ decDriver = (*bincDecDriver)(nil) -var _ encDriver = (*bincEncDriver)(nil) +func (e *bincEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) } + +// func (e *bincEncDriver[T]) writeStringAsisDblQuoted(v string) { e.w.writeqstr(v) } + +func (e *bincEncDriver[T]) writerEnd() { e.w.end() } + +func (e *bincEncDriver[T]) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *bincEncDriver[T]) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +// ---- + +func (d *bincDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*BincHandle) + d.d = shared + if shared.bytes { + fp = bincFpDecBytes + } else { + fp = bincFpDecIO + } + // d.r.init() + d.init2(dec) + return +} + +func (d *bincDecDriver[T]) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *bincDecDriver[T]) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *bincDecDriver[T]) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +// ---- (custom stanza) + +func (d *bincDecDriver[T]) descBd() string { + return sprintf("%v (%s)", d.bd, bincdescbd(d.bd)) +} + +func (d *bincDecDriver[T]) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} diff --git a/vendor/github.com/ugorji/go/codec/binc.mono.generated.go b/vendor/github.com/ugorji/go/codec/binc.mono.generated.go new file mode 100644 index 000000000..78a76027c --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.mono.generated.go @@ -0,0 +1,8158 @@ +//go:build !notmono && !codec.notmono + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + + "io" + "math" + "reflect" + "slices" + "sort" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +type helperEncDriverBincBytes struct{} +type encFnBincBytes struct { + i encFnInfo + fe func(*encoderBincBytes, *encFnInfo, reflect.Value) +} +type encRtidFnBincBytes struct { + rtid uintptr + fn *encFnBincBytes +} +type encoderBincBytes struct { + dh helperEncDriverBincBytes + fp *fastpathEsBincBytes + e bincEncDriverBytes + encoderBase +} +type helperDecDriverBincBytes struct{} +type decFnBincBytes struct { + i decFnInfo + fd func(*decoderBincBytes, *decFnInfo, reflect.Value) +} +type decRtidFnBincBytes struct { + rtid uintptr + fn *decFnBincBytes +} +type decoderBincBytes struct { + dh helperDecDriverBincBytes + fp *fastpathDsBincBytes + d bincDecDriverBytes + decoderBase +} +type bincEncDriverBytes struct { + noBuiltInTypes + encDriverNoopContainerWriter + encDriverContainerNoTrackerT + encInit2er + + h *BincHandle + e *encoderBase + w bytesEncAppender + bincEncState +} +type bincDecDriverBytes struct { + decDriverNoopContainerReader + + decInit2er + noBuiltInTypes + + h *BincHandle + d *decoderBase + r bytesDecReader + + bincDecState +} + +func (e *encoderBincBytes) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderBincBytes) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderBincBytes) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderBincBytes) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderBincBytes) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderBincBytes) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderBincBytes) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderBincBytes) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderBincBytes) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderBincBytes) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderBincBytes) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderBincBytes) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderBincBytes) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderBincBytes) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderBincBytes) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderBincBytes) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderBincBytes) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderBincBytes) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderBincBytes) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderBincBytes) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderBincBytes) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderBincBytes) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderBincBytes) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderBincBytes) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderBincBytes) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderBincBytes) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderBincBytes) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderBincBytes) kSeqFn(rt reflect.Type) (fn *encFnBincBytes) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderBincBytes) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnBincBytes + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderBincBytes) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnBincBytes + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderBincBytes) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderBincBytes) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderBincBytes) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderBincBytes) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderBincBytes) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderBincBytes) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderBincBytes) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderBincBytes) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnBincBytes + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincBytes) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnBincBytes) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderBincBytes) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsBincBytes) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderBincBytes) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderBincBytes) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderBincBytes) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderBincBytes) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderBincBytes) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderBincBytes) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderBincBytes) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderBincBytes) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderBincBytes) encodeValue(rv reflect.Value, fn *encFnBincBytes) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderBincBytes) encodeValueNonNil(rv reflect.Value, fn *encFnBincBytes) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderBincBytes) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderBincBytes) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderBincBytes) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderBincBytes) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderBincBytes) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderBincBytes) fn(t reflect.Type) *encFnBincBytes { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderBincBytes) fnNoExt(t reflect.Type) *encFnBincBytes { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderBincBytes) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderBincBytes) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderBincBytes) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderBincBytes) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderBincBytes) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderBincBytes) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderBincBytes) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderBincBytes) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverBincBytes) newEncoderBytes(out *[]byte, h Handle) *encoderBincBytes { + var c1 encoderBincBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverBincBytes) newEncoderIO(out io.Writer, h Handle) *encoderBincBytes { + var c1 encoderBincBytes + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverBincBytes) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsBincBytes) (f *fastpathEBincBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverBincBytes) encFindRtidFn(s []encRtidFnBincBytes, rtid uintptr) (i uint, fn *encFnBincBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverBincBytes) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnBincBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnBincBytes](v)) + } + return +} + +func (dh helperEncDriverBincBytes) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsBincBytes, checkExt bool) (fn *encFnBincBytes) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverBincBytes) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsBincBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnBincBytes) { + rtid := rt2id(rt) + var sp []encRtidFnBincBytes = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverBincBytes) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsBincBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnBincBytes) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnBincBytes + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnBincBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnBincBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnBincBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverBincBytes) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsBincBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnBincBytes) { + fn = new(encFnBincBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderBincBytes).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderBincBytes).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderBincBytes).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderBincBytes).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderBincBytes).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderBincBytes).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderBincBytes).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderBincBytes).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderBincBytes, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderBincBytes).kBool + case reflect.String: + + fn.fe = (*encoderBincBytes).kString + case reflect.Int: + fn.fe = (*encoderBincBytes).kInt + case reflect.Int8: + fn.fe = (*encoderBincBytes).kInt8 + case reflect.Int16: + fn.fe = (*encoderBincBytes).kInt16 + case reflect.Int32: + fn.fe = (*encoderBincBytes).kInt32 + case reflect.Int64: + fn.fe = (*encoderBincBytes).kInt64 + case reflect.Uint: + fn.fe = (*encoderBincBytes).kUint + case reflect.Uint8: + fn.fe = (*encoderBincBytes).kUint8 + case reflect.Uint16: + fn.fe = (*encoderBincBytes).kUint16 + case reflect.Uint32: + fn.fe = (*encoderBincBytes).kUint32 + case reflect.Uint64: + fn.fe = (*encoderBincBytes).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderBincBytes).kUintptr + case reflect.Float32: + fn.fe = (*encoderBincBytes).kFloat32 + case reflect.Float64: + fn.fe = (*encoderBincBytes).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderBincBytes).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderBincBytes).kComplex128 + case reflect.Chan: + fn.fe = (*encoderBincBytes).kChan + case reflect.Slice: + fn.fe = (*encoderBincBytes).kSlice + case reflect.Array: + fn.fe = (*encoderBincBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderBincBytes).kStructSimple + } else { + fn.fe = (*encoderBincBytes).kStruct + } + case reflect.Map: + fn.fe = (*encoderBincBytes).kMap + case reflect.Interface: + + fn.fe = (*encoderBincBytes).kErr + default: + + fn.fe = (*encoderBincBytes).kErr + } + } + } + return +} +func (d *decoderBincBytes) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderBincBytes) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderBincBytes) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderBincBytes) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderBincBytes) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderBincBytes) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderBincBytes) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderBincBytes) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderBincBytes) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderBincBytes) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderBincBytes) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderBincBytes) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderBincBytes) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderBincBytes) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderBincBytes) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderBincBytes) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderBincBytes) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderBincBytes) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderBincBytes) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderBincBytes) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderBincBytes) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderBincBytes) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderBincBytes) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderBincBytes) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderBincBytes) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderBincBytes) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderBincBytes) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderBincBytes) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderBincBytes) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderBincBytes) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderBincBytes) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderBincBytes) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderBincBytes) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnBincBytes + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderBincBytes) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnBincBytes + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincBytes) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnBincBytes + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderBincBytes) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnBincBytes + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderBincBytes) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsBincBytes) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderBincBytes) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderBincBytes) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderBincBytes) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderBincBytes) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderBincBytes) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderBincBytes) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderBincBytes) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderBincBytes) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderBincBytes) Release() {} + +func (d *decoderBincBytes) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderBincBytes) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderBincBytes) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderBincBytes) decodeValue(rv reflect.Value, fn *decFnBincBytes) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderBincBytes) decodeValueNoCheckNil(rv reflect.Value, fn *decFnBincBytes) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderBincBytes) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderBincBytes) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderBincBytes) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderBincBytes) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderBincBytes) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderBincBytes) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderBincBytes) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderBincBytes) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderBincBytes) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderBincBytes) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderBincBytes) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderBincBytes) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderBincBytes) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderBincBytes) fn(t reflect.Type) *decFnBincBytes { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderBincBytes) fnNoExt(t reflect.Type) *decFnBincBytes { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverBincBytes) newDecoderBytes(in []byte, h Handle) *decoderBincBytes { + var c1 decoderBincBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverBincBytes) newDecoderIO(in io.Reader, h Handle) *decoderBincBytes { + var c1 decoderBincBytes + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverBincBytes) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsBincBytes) (f *fastpathDBincBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverBincBytes) decFindRtidFn(s []decRtidFnBincBytes, rtid uintptr) (i uint, fn *decFnBincBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverBincBytes) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnBincBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnBincBytes](v)) + } + return +} + +func (dh helperDecDriverBincBytes) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsBincBytes, + checkExt bool) (fn *decFnBincBytes) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverBincBytes) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsBincBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnBincBytes) { + rtid := rt2id(rt) + var sp []decRtidFnBincBytes = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverBincBytes) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsBincBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnBincBytes) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnBincBytes + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnBincBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnBincBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnBincBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverBincBytes) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsBincBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnBincBytes) { + fn = new(decFnBincBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderBincBytes).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderBincBytes).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderBincBytes).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderBincBytes).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderBincBytes).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderBincBytes).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderBincBytes).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderBincBytes).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderBincBytes, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderBincBytes, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderBincBytes).kBool + case reflect.String: + fn.fd = (*decoderBincBytes).kString + case reflect.Int: + fn.fd = (*decoderBincBytes).kInt + case reflect.Int8: + fn.fd = (*decoderBincBytes).kInt8 + case reflect.Int16: + fn.fd = (*decoderBincBytes).kInt16 + case reflect.Int32: + fn.fd = (*decoderBincBytes).kInt32 + case reflect.Int64: + fn.fd = (*decoderBincBytes).kInt64 + case reflect.Uint: + fn.fd = (*decoderBincBytes).kUint + case reflect.Uint8: + fn.fd = (*decoderBincBytes).kUint8 + case reflect.Uint16: + fn.fd = (*decoderBincBytes).kUint16 + case reflect.Uint32: + fn.fd = (*decoderBincBytes).kUint32 + case reflect.Uint64: + fn.fd = (*decoderBincBytes).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderBincBytes).kUintptr + case reflect.Float32: + fn.fd = (*decoderBincBytes).kFloat32 + case reflect.Float64: + fn.fd = (*decoderBincBytes).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderBincBytes).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderBincBytes).kComplex128 + case reflect.Chan: + fn.fd = (*decoderBincBytes).kChan + case reflect.Slice: + fn.fd = (*decoderBincBytes).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderBincBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderBincBytes).kStructSimple + } else { + fn.fd = (*decoderBincBytes).kStruct + } + case reflect.Map: + fn.fd = (*decoderBincBytes).kMap + case reflect.Interface: + + fn.fd = (*decoderBincBytes).kInterface + default: + + fn.fd = (*decoderBincBytes).kErr + } + } + } + return +} +func (e *bincEncDriverBytes) EncodeNil() { + e.w.writen1(bincBdNil) +} + +func (e *bincEncDriverBytes) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else { + bs := bincEncodeTime(t) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriverBytes) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriverBytes) encSpFloat(f float64) (done bool) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + } else if math.IsNaN(float64(f)) { + e.w.writen1(bincVdSpecial<<4 | bincSpNan) + } else if math.IsInf(float64(f), +1) { + e.w.writen1(bincVdSpecial<<4 | bincSpPosInf) + } else if math.IsInf(float64(f), -1) { + e.w.writen1(bincVdSpecial<<4 | bincSpNegInf) + } else { + return + } + return true +} + +func (e *bincEncDriverBytes) EncodeFloat32(f float32) { + if !e.encSpFloat(float64(f)) { + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) + } +} + +func (e *bincEncDriverBytes) EncodeFloat64(f float64) { + if e.encSpFloat(f) { + return + } + b := bigen.PutUint64(math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writen8(b) +} + +func (e *bincEncDriverBytes) encIntegerPrune32(bd byte, pos bool, v uint64) { + b := bigen.PutUint32(uint32(v)) + if bincDoPrune { + i := byte(pruneSignExt(b[:], pos)) + e.w.writen1(bd | 3 - i) + e.w.writeb(b[i:]) + } else { + e.w.writen1(bd | 3) + e.w.writen4(b) + } +} + +func (e *bincEncDriverBytes) encIntegerPrune64(bd byte, pos bool, v uint64) { + b := bigen.PutUint64(v) + if bincDoPrune { + i := byte(pruneSignExt(b[:], pos)) + e.w.writen1(bd | 7 - i) + e.w.writeb(b[i:]) + } else { + e.w.writen1(bd | 7) + e.w.writen8(b) + } +} + +func (e *bincEncDriverBytes) EncodeInt(v int64) { + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriverBytes) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriverBytes) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.encIntegerPrune32(bd, pos, v) + } else { + e.encIntegerPrune64(bd, pos, v) + } +} + +func (e *bincEncDriverBytes) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + var bs0, bs []byte + if ext == SelfExt { + bs0 = e.e.blist.get(1024) + bs = bs0 + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) + } else { + bs = ext.WriteExt(v) + } + if bs == nil { + e.writeNilBytes() + goto END + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +END: + if ext == SelfExt { + e.e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.e.blist.put(bs0) + } + } +} + +func (e *bincEncDriverBytes) EncodeRawExt(re *RawExt) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriverBytes) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriverBytes) WriteArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriverBytes) WriteMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriverBytes) WriteArrayEmpty() { + + e.w.writen1(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriverBytes) WriteMapEmpty() { + + e.w.writen1(bincVdMap<<4 | uint8(0+4)) +} + +func (e *bincEncDriverBytes) EncodeSymbol(v string) { + + l := len(v) + if l == 0 { + e.encBytesLen(cUTF8, 0) + return + } else if l == 1 { + e.encBytesLen(cUTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writen2(bigen.PutUint16(ui)) + } + } else { + e.e.seq++ + ui = e.e.seq + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writen2(bigen.PutUint16(ui)) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + e.w.writen2(bigen.PutUint16(uint16(l))) + } else if lenprec == 2 { + e.w.writen4(bigen.PutUint32(uint32(l))) + } else { + e.w.writen8(bigen.PutUint64(uint64(l))) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriverBytes) EncodeString(v string) { + if e.h.StringToRaw { + e.encLen(bincVdByteArray<<4, uint64(len(v))) + if len(v) > 0 { + e.w.writestr(v) + } + return + } + e.EncodeStringEnc(cUTF8, v) +} + +func (e *bincEncDriverBytes) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *bincEncDriverBytes) EncodeStringEnc(c charEncoding, v string) { + if e.e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + e.encLen(bincVdString<<4, uint64(len(v))) + if len(v) > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriverBytes) EncodeStringBytesRaw(v []byte) { + e.encLen(bincVdByteArray<<4, uint64(len(v))) + if len(v) > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriverBytes) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *bincEncDriverBytes) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = bincBdNil + } + e.w.writen1(v) +} + +func (e *bincEncDriverBytes) writeNilArray() { + e.writeNilOr(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriverBytes) writeNilMap() { + e.writeNilOr(bincVdMap<<4 | uint8(0+4)) +} + +func (e *bincEncDriverBytes) writeNilBytes() { + e.writeNilOr(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriverBytes) encBytesLen(c charEncoding, length uint64) { + + if c == cRAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriverBytes) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriverBytes) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + e.w.writen4(bigen.PutUint32(uint32(v))) + } else { + e.w.writen1(bd | 0x03) + e.w.writen8(bigen.PutUint64(uint64(v))) + } +} + +func (d *bincDecDriverBytes) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriverBytes) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincBdNil { + d.bdRead = false + return true + } + return +} + +func (d *bincDecDriverBytes) TryNil() bool { + return d.advanceNil() +} + +func (d *bincDecDriverBytes) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincBdNil { + d.bdRead = false + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } + return valueTypeUnset +} + +func (d *bincDecDriverBytes) DecodeTime() (t time.Time) { + if d.advanceNil() { + return + } + if d.vd != bincVdTimestamp { + halt.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + t, err := bincDecodeTime(d.r.readx(uint(d.vs))) + halt.onerror(err) + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) decFloatPruned(maxlen uint8) { + l := d.r.readn1() + if l > maxlen { + halt.errorf("cannot read float - at most %v bytes used to represent float - received %v bytes", maxlen, l) + } + for i := l; i < maxlen; i++ { + d.d.b[i] = 0 + } + d.r.readb(d.d.b[0:l]) +} + +func (d *bincDecDriverBytes) decFloatPre32() (b [4]byte) { + if d.vs&0x8 == 0 { + b = d.r.readn4() + } else { + d.decFloatPruned(4) + copy(b[:], d.d.b[:]) + } + return +} + +func (d *bincDecDriverBytes) decFloatPre64() (b [8]byte) { + if d.vs&0x8 == 0 { + b = d.r.readn8() + } else { + d.decFloatPruned(8) + copy(b[:], d.d.b[:]) + } + return +} + +func (d *bincDecDriverBytes) decFloatVal() (f float64) { + switch d.vs & 0x7 { + case bincFlBin32: + f = float64(math.Float32frombits(bigen.Uint32(d.decFloatPre32()))) + case bincFlBin64: + f = math.Float64frombits(bigen.Uint64(d.decFloatPre64())) + default: + + halt.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + return +} + +func (d *bincDecDriverBytes) decUint() (v uint64) { + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + v = uint64(bigen.Uint16(d.r.readn2())) + case 2: + b3 := d.r.readn3() + var b [4]byte + copy(b[1:], b3[:]) + v = uint64(bigen.Uint32(b)) + case 3: + v = uint64(bigen.Uint32(d.r.readn4())) + case 4, 5, 6: + + bs := d.d.b[:8] + clear(bs) + d.r.readb(bs[(7 - d.vs):]) + v = bigen.Uint64(*(*[8]byte)(bs)) + case 7: + v = bigen.Uint64(d.r.readn8()) + default: + halt.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) + } + return +} + +func (d *bincDecDriverBytes) uintBytes() (bs []byte) { + switch d.vs { + case 0: + bs = d.d.b[:1] + bs[0] = d.r.readn1() + return + case 1: + bs = d.d.b[:2] + case 2: + bs = d.d.b[:3] + case 3: + bs = d.d.b[:4] + case 4, 5, 6: + lim := 7 - d.vs + bs = d.d.b[lim:8] + case 7: + bs = d.d.b[:8] + default: + halt.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) + } + d.r.readb(bs) + return +} + +func (d *bincDecDriverBytes) decInteger() (ui uint64, neg, ok bool) { + ok = true + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + ok = false + + } + } else { + ok = false + + } + return +} + +func (d *bincDecDriverBytes) decFloat() (f float64, ok bool) { + ok = true + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + if vs == bincSpNan { + f = math.NaN() + } else if vs == bincSpPosInf { + f = math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + + } else if vs == bincSpNegInf { + f = math.Inf(-1) + } else { + ok = false + + } + } else if vd == bincVdFloat { + f = d.decFloatVal() + } else { + ok = false + } + return +} + +func (d *bincDecDriverBytes) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false) + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false) + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.bd == (bincVdSpecial | bincSpFalse) { + + } else if d.bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + halt.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) ReadMapStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.vd != bincVdMap { + halt.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) ReadArrayStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.vd != bincVdArray { + halt.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriverBytes) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + v = uint64(bigen.Uint16(d.r.readn2())) + } else if x == 2 { + v = uint64(bigen.Uint32(d.r.readn4())) + } else { + v = bigen.Uint64(d.r.readn8()) + } + return +} + +func (d *bincDecDriverBytes) DecodeStringAsBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + var cond bool + var slen = -1 + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + bs, cond = d.r.readxb(uint(slen)) + state = d.d.attachState(cond) + case bincVdSymbol: + + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readn2())) + } + if d.s == nil { + d.s = make(map[uint16][]byte, 16) + } + + if vs&0x4 == 0 { + bs = d.s[symbol] + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readn2())) + case 2: + slen = int(bigen.Uint32(d.r.readn4())) + case 3: + slen = int(bigen.Uint64(d.r.readn8())) + } + + bs, cond = d.r.readxb(uint(slen)) + bs = d.d.detach2Bytes(bs, d.d.attachState(cond)) + d.s[symbol] = bs + } + state = dBytesDetach + default: + halt.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + + if d.h.ValidateUnicode && !utf8.Valid(bs) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", bs) + } + + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + var cond bool + if d.vd == bincVdArray { + slen := d.ReadArrayStart() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < slen; i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + return + } + if !(d.vd == bincVdString || d.vd == bincVdByteArray) { + halt.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *bincDecDriverBytes) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) + } else { + ext.ReadExt(rv, xbs) + } +} + +func (d *bincDecDriverBytes) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *bincDecDriverBytes) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + halt.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) + } + xbs, ok = d.r.readxb(uint(l)) + bstate = d.d.attachState(ok) + + } else if d.vd == bincVdByteArray { + xbs, bstate = d.DecodeBytes() + } else { + halt.errorf("ext expects extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + d.bdRead = false + ok = true + return +} + +func (d *bincDecDriverBytes) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) + default: + halt.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloatVal() + case bincVdString: + n.v = valueTypeString + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case bincVdByteArray: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case bincVdTimestamp: + n.v = valueTypeTime + tt, err := bincDecodeTime(d.r.readx(uint(d.vs))) + halt.onerror(err) + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(uint(l)) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + halt.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } +} + +func (d *bincDecDriverBytes) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *bincDecDriverBytes) nextValueBytesBdReadR() { + fnLen := func(vs byte) uint { + switch vs { + case 0: + x := d.r.readn1() + return uint(x) + case 1: + x := d.r.readn2() + return uint(bigen.Uint16(x)) + case 2: + x := d.r.readn4() + return uint(bigen.Uint32(x)) + case 3: + x := d.r.readn8() + return uint(bigen.Uint64(x)) + default: + return uint(vs - 4) + } + } + + var clen uint + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil, bincSpFalse, bincSpTrue, bincSpNan, bincSpPosInf: + case bincSpNegInf, bincSpZeroFloat, bincSpZero, bincSpNegOne: + default: + halt.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdSmallInt: + case bincVdPosInt, bincVdNegInt: + d.uintBytes() + case bincVdFloat: + fn := func(xlen byte) { + if d.vs&0x8 != 0 { + xlen = d.r.readn1() + if xlen > 8 { + halt.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", xlen) + } + } + d.r.readb(d.d.b[:xlen]) + } + switch d.vs & 0x7 { + case bincFlBin32: + fn(4) + case bincFlBin64: + fn(8) + default: + halt.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdString, bincVdByteArray: + clen = fnLen(d.vs) + d.r.skip(clen) + case bincVdSymbol: + if d.vs&0x8 == 0 { + d.r.readn1() + } else { + d.r.skip(2) + } + if d.vs&0x4 != 0 { + clen = fnLen(d.vs & 0x3) + d.r.skip(clen) + } + case bincVdTimestamp: + d.r.skip(uint(d.vs)) + case bincVdCustomExt: + clen = fnLen(d.vs) + d.r.readn1() + d.r.skip(clen) + case bincVdArray: + clen = fnLen(d.vs) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case bincVdMap: + clen = fnLen(d.vs) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + default: + halt.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + return +} + +func (d *bincEncDriverBytes) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*BincHandle) + d.e = shared + if shared.bytes { + fp = bincFpEncBytes + } else { + fp = bincFpEncIO + } + + d.init2(enc) + return +} + +func (e *bincEncDriverBytes) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *bincEncDriverBytes) writerEnd() { e.w.end() } + +func (e *bincEncDriverBytes) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *bincEncDriverBytes) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *bincDecDriverBytes) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*BincHandle) + d.d = shared + if shared.bytes { + fp = bincFpDecBytes + } else { + fp = bincFpDecIO + } + + d.init2(dec) + return +} + +func (d *bincDecDriverBytes) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *bincDecDriverBytes) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *bincDecDriverBytes) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *bincDecDriverBytes) descBd() string { + return sprintf("%v (%s)", d.bd, bincdescbd(d.bd)) +} + +func (d *bincDecDriverBytes) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} + +type helperEncDriverBincIO struct{} +type encFnBincIO struct { + i encFnInfo + fe func(*encoderBincIO, *encFnInfo, reflect.Value) +} +type encRtidFnBincIO struct { + rtid uintptr + fn *encFnBincIO +} +type encoderBincIO struct { + dh helperEncDriverBincIO + fp *fastpathEsBincIO + e bincEncDriverIO + encoderBase +} +type helperDecDriverBincIO struct{} +type decFnBincIO struct { + i decFnInfo + fd func(*decoderBincIO, *decFnInfo, reflect.Value) +} +type decRtidFnBincIO struct { + rtid uintptr + fn *decFnBincIO +} +type decoderBincIO struct { + dh helperDecDriverBincIO + fp *fastpathDsBincIO + d bincDecDriverIO + decoderBase +} +type bincEncDriverIO struct { + noBuiltInTypes + encDriverNoopContainerWriter + encDriverContainerNoTrackerT + encInit2er + + h *BincHandle + e *encoderBase + w bufioEncWriter + bincEncState +} +type bincDecDriverIO struct { + decDriverNoopContainerReader + + decInit2er + noBuiltInTypes + + h *BincHandle + d *decoderBase + r ioDecReader + + bincDecState +} + +func (e *encoderBincIO) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderBincIO) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderBincIO) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderBincIO) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderBincIO) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderBincIO) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderBincIO) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderBincIO) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderBincIO) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderBincIO) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderBincIO) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderBincIO) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderBincIO) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderBincIO) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderBincIO) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderBincIO) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderBincIO) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderBincIO) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderBincIO) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderBincIO) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderBincIO) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderBincIO) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderBincIO) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderBincIO) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderBincIO) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderBincIO) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderBincIO) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderBincIO) kSeqFn(rt reflect.Type) (fn *encFnBincIO) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderBincIO) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnBincIO + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderBincIO) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnBincIO + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderBincIO) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderBincIO) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderBincIO) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderBincIO) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderBincIO) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderBincIO) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderBincIO) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderBincIO) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnBincIO + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderBincIO) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnBincIO) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderBincIO) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsBincIO) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderBincIO) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderBincIO) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderBincIO) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderBincIO) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderBincIO) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderBincIO) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderBincIO) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderBincIO) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderBincIO) encodeValue(rv reflect.Value, fn *encFnBincIO) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderBincIO) encodeValueNonNil(rv reflect.Value, fn *encFnBincIO) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderBincIO) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderBincIO) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderBincIO) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderBincIO) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderBincIO) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderBincIO) fn(t reflect.Type) *encFnBincIO { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderBincIO) fnNoExt(t reflect.Type) *encFnBincIO { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderBincIO) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderBincIO) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderBincIO) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderBincIO) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderBincIO) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderBincIO) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderBincIO) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderBincIO) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverBincIO) newEncoderBytes(out *[]byte, h Handle) *encoderBincIO { + var c1 encoderBincIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverBincIO) newEncoderIO(out io.Writer, h Handle) *encoderBincIO { + var c1 encoderBincIO + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverBincIO) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsBincIO) (f *fastpathEBincIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverBincIO) encFindRtidFn(s []encRtidFnBincIO, rtid uintptr) (i uint, fn *encFnBincIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverBincIO) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnBincIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnBincIO](v)) + } + return +} + +func (dh helperEncDriverBincIO) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsBincIO, checkExt bool) (fn *encFnBincIO) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverBincIO) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsBincIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnBincIO) { + rtid := rt2id(rt) + var sp []encRtidFnBincIO = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverBincIO) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsBincIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnBincIO) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnBincIO + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnBincIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnBincIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnBincIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverBincIO) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsBincIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnBincIO) { + fn = new(encFnBincIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderBincIO).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderBincIO).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderBincIO).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderBincIO).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderBincIO).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderBincIO).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderBincIO).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderBincIO).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderBincIO, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderBincIO).kBool + case reflect.String: + + fn.fe = (*encoderBincIO).kString + case reflect.Int: + fn.fe = (*encoderBincIO).kInt + case reflect.Int8: + fn.fe = (*encoderBincIO).kInt8 + case reflect.Int16: + fn.fe = (*encoderBincIO).kInt16 + case reflect.Int32: + fn.fe = (*encoderBincIO).kInt32 + case reflect.Int64: + fn.fe = (*encoderBincIO).kInt64 + case reflect.Uint: + fn.fe = (*encoderBincIO).kUint + case reflect.Uint8: + fn.fe = (*encoderBincIO).kUint8 + case reflect.Uint16: + fn.fe = (*encoderBincIO).kUint16 + case reflect.Uint32: + fn.fe = (*encoderBincIO).kUint32 + case reflect.Uint64: + fn.fe = (*encoderBincIO).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderBincIO).kUintptr + case reflect.Float32: + fn.fe = (*encoderBincIO).kFloat32 + case reflect.Float64: + fn.fe = (*encoderBincIO).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderBincIO).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderBincIO).kComplex128 + case reflect.Chan: + fn.fe = (*encoderBincIO).kChan + case reflect.Slice: + fn.fe = (*encoderBincIO).kSlice + case reflect.Array: + fn.fe = (*encoderBincIO).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderBincIO).kStructSimple + } else { + fn.fe = (*encoderBincIO).kStruct + } + case reflect.Map: + fn.fe = (*encoderBincIO).kMap + case reflect.Interface: + + fn.fe = (*encoderBincIO).kErr + default: + + fn.fe = (*encoderBincIO).kErr + } + } + } + return +} +func (d *decoderBincIO) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderBincIO) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderBincIO) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderBincIO) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderBincIO) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderBincIO) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderBincIO) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderBincIO) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderBincIO) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderBincIO) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderBincIO) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderBincIO) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderBincIO) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderBincIO) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderBincIO) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderBincIO) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderBincIO) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderBincIO) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderBincIO) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderBincIO) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderBincIO) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderBincIO) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderBincIO) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderBincIO) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderBincIO) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderBincIO) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderBincIO) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderBincIO) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderBincIO) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderBincIO) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderBincIO) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderBincIO) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderBincIO) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnBincIO + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderBincIO) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnBincIO + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderBincIO) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnBincIO + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderBincIO) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnBincIO + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderBincIO) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsBincIO) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderBincIO) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderBincIO) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderBincIO) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderBincIO) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderBincIO) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderBincIO) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderBincIO) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderBincIO) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderBincIO) Release() {} + +func (d *decoderBincIO) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderBincIO) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderBincIO) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderBincIO) decodeValue(rv reflect.Value, fn *decFnBincIO) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderBincIO) decodeValueNoCheckNil(rv reflect.Value, fn *decFnBincIO) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderBincIO) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderBincIO) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderBincIO) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderBincIO) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderBincIO) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderBincIO) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderBincIO) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderBincIO) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderBincIO) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderBincIO) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderBincIO) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderBincIO) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderBincIO) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderBincIO) fn(t reflect.Type) *decFnBincIO { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderBincIO) fnNoExt(t reflect.Type) *decFnBincIO { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverBincIO) newDecoderBytes(in []byte, h Handle) *decoderBincIO { + var c1 decoderBincIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverBincIO) newDecoderIO(in io.Reader, h Handle) *decoderBincIO { + var c1 decoderBincIO + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverBincIO) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsBincIO) (f *fastpathDBincIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverBincIO) decFindRtidFn(s []decRtidFnBincIO, rtid uintptr) (i uint, fn *decFnBincIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverBincIO) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnBincIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnBincIO](v)) + } + return +} + +func (dh helperDecDriverBincIO) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsBincIO, + checkExt bool) (fn *decFnBincIO) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverBincIO) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsBincIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnBincIO) { + rtid := rt2id(rt) + var sp []decRtidFnBincIO = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverBincIO) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsBincIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnBincIO) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnBincIO + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnBincIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnBincIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnBincIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverBincIO) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsBincIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnBincIO) { + fn = new(decFnBincIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderBincIO).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderBincIO).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderBincIO).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderBincIO).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderBincIO).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderBincIO).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderBincIO).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderBincIO).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderBincIO, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderBincIO, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderBincIO).kBool + case reflect.String: + fn.fd = (*decoderBincIO).kString + case reflect.Int: + fn.fd = (*decoderBincIO).kInt + case reflect.Int8: + fn.fd = (*decoderBincIO).kInt8 + case reflect.Int16: + fn.fd = (*decoderBincIO).kInt16 + case reflect.Int32: + fn.fd = (*decoderBincIO).kInt32 + case reflect.Int64: + fn.fd = (*decoderBincIO).kInt64 + case reflect.Uint: + fn.fd = (*decoderBincIO).kUint + case reflect.Uint8: + fn.fd = (*decoderBincIO).kUint8 + case reflect.Uint16: + fn.fd = (*decoderBincIO).kUint16 + case reflect.Uint32: + fn.fd = (*decoderBincIO).kUint32 + case reflect.Uint64: + fn.fd = (*decoderBincIO).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderBincIO).kUintptr + case reflect.Float32: + fn.fd = (*decoderBincIO).kFloat32 + case reflect.Float64: + fn.fd = (*decoderBincIO).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderBincIO).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderBincIO).kComplex128 + case reflect.Chan: + fn.fd = (*decoderBincIO).kChan + case reflect.Slice: + fn.fd = (*decoderBincIO).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderBincIO).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderBincIO).kStructSimple + } else { + fn.fd = (*decoderBincIO).kStruct + } + case reflect.Map: + fn.fd = (*decoderBincIO).kMap + case reflect.Interface: + + fn.fd = (*decoderBincIO).kInterface + default: + + fn.fd = (*decoderBincIO).kErr + } + } + } + return +} +func (e *bincEncDriverIO) EncodeNil() { + e.w.writen1(bincBdNil) +} + +func (e *bincEncDriverIO) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else { + bs := bincEncodeTime(t) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriverIO) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriverIO) encSpFloat(f float64) (done bool) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + } else if math.IsNaN(float64(f)) { + e.w.writen1(bincVdSpecial<<4 | bincSpNan) + } else if math.IsInf(float64(f), +1) { + e.w.writen1(bincVdSpecial<<4 | bincSpPosInf) + } else if math.IsInf(float64(f), -1) { + e.w.writen1(bincVdSpecial<<4 | bincSpNegInf) + } else { + return + } + return true +} + +func (e *bincEncDriverIO) EncodeFloat32(f float32) { + if !e.encSpFloat(float64(f)) { + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) + } +} + +func (e *bincEncDriverIO) EncodeFloat64(f float64) { + if e.encSpFloat(f) { + return + } + b := bigen.PutUint64(math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writen8(b) +} + +func (e *bincEncDriverIO) encIntegerPrune32(bd byte, pos bool, v uint64) { + b := bigen.PutUint32(uint32(v)) + if bincDoPrune { + i := byte(pruneSignExt(b[:], pos)) + e.w.writen1(bd | 3 - i) + e.w.writeb(b[i:]) + } else { + e.w.writen1(bd | 3) + e.w.writen4(b) + } +} + +func (e *bincEncDriverIO) encIntegerPrune64(bd byte, pos bool, v uint64) { + b := bigen.PutUint64(v) + if bincDoPrune { + i := byte(pruneSignExt(b[:], pos)) + e.w.writen1(bd | 7 - i) + e.w.writeb(b[i:]) + } else { + e.w.writen1(bd | 7) + e.w.writen8(b) + } +} + +func (e *bincEncDriverIO) EncodeInt(v int64) { + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriverIO) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriverIO) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.encIntegerPrune32(bd, pos, v) + } else { + e.encIntegerPrune64(bd, pos, v) + } +} + +func (e *bincEncDriverIO) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + var bs0, bs []byte + if ext == SelfExt { + bs0 = e.e.blist.get(1024) + bs = bs0 + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) + } else { + bs = ext.WriteExt(v) + } + if bs == nil { + e.writeNilBytes() + goto END + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +END: + if ext == SelfExt { + e.e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.e.blist.put(bs0) + } + } +} + +func (e *bincEncDriverIO) EncodeRawExt(re *RawExt) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriverIO) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriverIO) WriteArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriverIO) WriteMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriverIO) WriteArrayEmpty() { + + e.w.writen1(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriverIO) WriteMapEmpty() { + + e.w.writen1(bincVdMap<<4 | uint8(0+4)) +} + +func (e *bincEncDriverIO) EncodeSymbol(v string) { + + l := len(v) + if l == 0 { + e.encBytesLen(cUTF8, 0) + return + } else if l == 1 { + e.encBytesLen(cUTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writen2(bigen.PutUint16(ui)) + } + } else { + e.e.seq++ + ui = e.e.seq + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writen2(bigen.PutUint16(ui)) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + e.w.writen2(bigen.PutUint16(uint16(l))) + } else if lenprec == 2 { + e.w.writen4(bigen.PutUint32(uint32(l))) + } else { + e.w.writen8(bigen.PutUint64(uint64(l))) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriverIO) EncodeString(v string) { + if e.h.StringToRaw { + e.encLen(bincVdByteArray<<4, uint64(len(v))) + if len(v) > 0 { + e.w.writestr(v) + } + return + } + e.EncodeStringEnc(cUTF8, v) +} + +func (e *bincEncDriverIO) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *bincEncDriverIO) EncodeStringEnc(c charEncoding, v string) { + if e.e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + e.encLen(bincVdString<<4, uint64(len(v))) + if len(v) > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriverIO) EncodeStringBytesRaw(v []byte) { + e.encLen(bincVdByteArray<<4, uint64(len(v))) + if len(v) > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriverIO) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *bincEncDriverIO) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = bincBdNil + } + e.w.writen1(v) +} + +func (e *bincEncDriverIO) writeNilArray() { + e.writeNilOr(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriverIO) writeNilMap() { + e.writeNilOr(bincVdMap<<4 | uint8(0+4)) +} + +func (e *bincEncDriverIO) writeNilBytes() { + e.writeNilOr(bincVdArray<<4 | uint8(0+4)) +} + +func (e *bincEncDriverIO) encBytesLen(c charEncoding, length uint64) { + + if c == cRAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriverIO) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriverIO) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + e.w.writen4(bigen.PutUint32(uint32(v))) + } else { + e.w.writen1(bd | 0x03) + e.w.writen8(bigen.PutUint64(uint64(v))) + } +} + +func (d *bincDecDriverIO) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriverIO) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincBdNil { + d.bdRead = false + return true + } + return +} + +func (d *bincDecDriverIO) TryNil() bool { + return d.advanceNil() +} + +func (d *bincDecDriverIO) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincBdNil { + d.bdRead = false + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } + return valueTypeUnset +} + +func (d *bincDecDriverIO) DecodeTime() (t time.Time) { + if d.advanceNil() { + return + } + if d.vd != bincVdTimestamp { + halt.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + t, err := bincDecodeTime(d.r.readx(uint(d.vs))) + halt.onerror(err) + d.bdRead = false + return +} + +func (d *bincDecDriverIO) decFloatPruned(maxlen uint8) { + l := d.r.readn1() + if l > maxlen { + halt.errorf("cannot read float - at most %v bytes used to represent float - received %v bytes", maxlen, l) + } + for i := l; i < maxlen; i++ { + d.d.b[i] = 0 + } + d.r.readb(d.d.b[0:l]) +} + +func (d *bincDecDriverIO) decFloatPre32() (b [4]byte) { + if d.vs&0x8 == 0 { + b = d.r.readn4() + } else { + d.decFloatPruned(4) + copy(b[:], d.d.b[:]) + } + return +} + +func (d *bincDecDriverIO) decFloatPre64() (b [8]byte) { + if d.vs&0x8 == 0 { + b = d.r.readn8() + } else { + d.decFloatPruned(8) + copy(b[:], d.d.b[:]) + } + return +} + +func (d *bincDecDriverIO) decFloatVal() (f float64) { + switch d.vs & 0x7 { + case bincFlBin32: + f = float64(math.Float32frombits(bigen.Uint32(d.decFloatPre32()))) + case bincFlBin64: + f = math.Float64frombits(bigen.Uint64(d.decFloatPre64())) + default: + + halt.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + return +} + +func (d *bincDecDriverIO) decUint() (v uint64) { + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + v = uint64(bigen.Uint16(d.r.readn2())) + case 2: + b3 := d.r.readn3() + var b [4]byte + copy(b[1:], b3[:]) + v = uint64(bigen.Uint32(b)) + case 3: + v = uint64(bigen.Uint32(d.r.readn4())) + case 4, 5, 6: + + bs := d.d.b[:8] + clear(bs) + d.r.readb(bs[(7 - d.vs):]) + v = bigen.Uint64(*(*[8]byte)(bs)) + case 7: + v = bigen.Uint64(d.r.readn8()) + default: + halt.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) + } + return +} + +func (d *bincDecDriverIO) uintBytes() (bs []byte) { + switch d.vs { + case 0: + bs = d.d.b[:1] + bs[0] = d.r.readn1() + return + case 1: + bs = d.d.b[:2] + case 2: + bs = d.d.b[:3] + case 3: + bs = d.d.b[:4] + case 4, 5, 6: + lim := 7 - d.vs + bs = d.d.b[lim:8] + case 7: + bs = d.d.b[:8] + default: + halt.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs) + } + d.r.readb(bs) + return +} + +func (d *bincDecDriverIO) decInteger() (ui uint64, neg, ok bool) { + ok = true + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + ok = false + + } + } else { + ok = false + + } + return +} + +func (d *bincDecDriverIO) decFloat() (f float64, ok bool) { + ok = true + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + if vs == bincSpNan { + f = math.NaN() + } else if vs == bincSpPosInf { + f = math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + + } else if vs == bincSpNegInf { + f = math.Inf(-1) + } else { + ok = false + + } + } else if vd == bincVdFloat { + f = d.decFloatVal() + } else { + ok = false + } + return +} + +func (d *bincDecDriverIO) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false) + d.bdRead = false + return +} + +func (d *bincDecDriverIO) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) + d.bdRead = false + return +} + +func (d *bincDecDriverIO) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false) + d.bdRead = false + return +} + +func (d *bincDecDriverIO) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.bd == (bincVdSpecial | bincSpFalse) { + + } else if d.bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + halt.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + d.bdRead = false + return +} + +func (d *bincDecDriverIO) ReadMapStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.vd != bincVdMap { + halt.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriverIO) ReadArrayStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.vd != bincVdArray { + halt.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriverIO) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriverIO) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + v = uint64(bigen.Uint16(d.r.readn2())) + } else if x == 2 { + v = uint64(bigen.Uint32(d.r.readn4())) + } else { + v = bigen.Uint64(d.r.readn8()) + } + return +} + +func (d *bincDecDriverIO) DecodeStringAsBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + var cond bool + var slen = -1 + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + bs, cond = d.r.readxb(uint(slen)) + state = d.d.attachState(cond) + case bincVdSymbol: + + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readn2())) + } + if d.s == nil { + d.s = make(map[uint16][]byte, 16) + } + + if vs&0x4 == 0 { + bs = d.s[symbol] + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readn2())) + case 2: + slen = int(bigen.Uint32(d.r.readn4())) + case 3: + slen = int(bigen.Uint64(d.r.readn8())) + } + + bs, cond = d.r.readxb(uint(slen)) + bs = d.d.detach2Bytes(bs, d.d.attachState(cond)) + d.s[symbol] = bs + } + state = dBytesDetach + default: + halt.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + + if d.h.ValidateUnicode && !utf8.Valid(bs) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", bs) + } + + d.bdRead = false + return +} + +func (d *bincDecDriverIO) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + var cond bool + if d.vd == bincVdArray { + slen := d.ReadArrayStart() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < slen; i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + return + } + if !(d.vd == bincVdString || d.vd == bincVdByteArray) { + halt.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *bincDecDriverIO) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) + } else { + ext.ReadExt(rv, xbs) + } +} + +func (d *bincDecDriverIO) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *bincDecDriverIO) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + halt.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) + } + xbs, ok = d.r.readxb(uint(l)) + bstate = d.d.attachState(ok) + + } else if d.vd == bincVdByteArray { + xbs, bstate = d.DecodeBytes() + } else { + halt.errorf("ext expects extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + d.bdRead = false + ok = true + return +} + +func (d *bincDecDriverIO) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) + default: + halt.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloatVal() + case bincVdString: + n.v = valueTypeString + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case bincVdByteArray: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case bincVdTimestamp: + n.v = valueTypeTime + tt, err := bincDecodeTime(d.r.readx(uint(d.vs))) + halt.onerror(err) + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(uint(l)) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + halt.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } +} + +func (d *bincDecDriverIO) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *bincDecDriverIO) nextValueBytesBdReadR() { + fnLen := func(vs byte) uint { + switch vs { + case 0: + x := d.r.readn1() + return uint(x) + case 1: + x := d.r.readn2() + return uint(bigen.Uint16(x)) + case 2: + x := d.r.readn4() + return uint(bigen.Uint32(x)) + case 3: + x := d.r.readn8() + return uint(bigen.Uint64(x)) + default: + return uint(vs - 4) + } + } + + var clen uint + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil, bincSpFalse, bincSpTrue, bincSpNan, bincSpPosInf: + case bincSpNegInf, bincSpZeroFloat, bincSpZero, bincSpNegOne: + default: + halt.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdSmallInt: + case bincVdPosInt, bincVdNegInt: + d.uintBytes() + case bincVdFloat: + fn := func(xlen byte) { + if d.vs&0x8 != 0 { + xlen = d.r.readn1() + if xlen > 8 { + halt.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", xlen) + } + } + d.r.readb(d.d.b[:xlen]) + } + switch d.vs & 0x7 { + case bincFlBin32: + fn(4) + case bincFlBin64: + fn(8) + default: + halt.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdString, bincVdByteArray: + clen = fnLen(d.vs) + d.r.skip(clen) + case bincVdSymbol: + if d.vs&0x8 == 0 { + d.r.readn1() + } else { + d.r.skip(2) + } + if d.vs&0x4 != 0 { + clen = fnLen(d.vs & 0x3) + d.r.skip(clen) + } + case bincVdTimestamp: + d.r.skip(uint(d.vs)) + case bincVdCustomExt: + clen = fnLen(d.vs) + d.r.readn1() + d.r.skip(clen) + case bincVdArray: + clen = fnLen(d.vs) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case bincVdMap: + clen = fnLen(d.vs) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + default: + halt.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + return +} + +func (d *bincEncDriverIO) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*BincHandle) + d.e = shared + if shared.bytes { + fp = bincFpEncBytes + } else { + fp = bincFpEncIO + } + + d.init2(enc) + return +} + +func (e *bincEncDriverIO) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *bincEncDriverIO) writerEnd() { e.w.end() } + +func (e *bincEncDriverIO) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *bincEncDriverIO) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *bincDecDriverIO) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*BincHandle) + d.d = shared + if shared.bytes { + fp = bincFpDecBytes + } else { + fp = bincFpDecIO + } + + d.init2(dec) + return +} + +func (d *bincDecDriverIO) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *bincDecDriverIO) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *bincDecDriverIO) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *bincDecDriverIO) descBd() string { + return sprintf("%v (%s)", d.bd, bincdescbd(d.bd)) +} + +func (d *bincDecDriverIO) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} diff --git a/vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go new file mode 100644 index 000000000..ca893afcd --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go @@ -0,0 +1,52 @@ +//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +type fastpathEBincBytes struct { + rt reflect.Type + encfn func(*encoderBincBytes, *encFnInfo, reflect.Value) +} +type fastpathDBincBytes struct { + rt reflect.Type + decfn func(*decoderBincBytes, *decFnInfo, reflect.Value) +} +type fastpathEsBincBytes [0]fastpathEBincBytes +type fastpathDsBincBytes [0]fastpathDBincBytes + +func (helperEncDriverBincBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincBytes) bool { + return false +} +func (helperDecDriverBincBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincBytes) bool { + return false +} + +func (helperEncDriverBincBytes) fastpathEList() (v *fastpathEsBincBytes) { return } +func (helperDecDriverBincBytes) fastpathDList() (v *fastpathDsBincBytes) { return } + +type fastpathEBincIO struct { + rt reflect.Type + encfn func(*encoderBincIO, *encFnInfo, reflect.Value) +} +type fastpathDBincIO struct { + rt reflect.Type + decfn func(*decoderBincIO, *decFnInfo, reflect.Value) +} +type fastpathEsBincIO [0]fastpathEBincIO +type fastpathDsBincIO [0]fastpathDBincIO + +func (helperEncDriverBincIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincIO) bool { + return false +} +func (helperDecDriverBincIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincIO) bool { + return false +} + +func (helperEncDriverBincIO) fastpathEList() (v *fastpathEsBincIO) { return } +func (helperDecDriverBincIO) fastpathDList() (v *fastpathDsBincIO) { return } diff --git a/vendor/github.com/ugorji/go/codec/build.sh b/vendor/github.com/ugorji/go/codec/build.sh index 023faf3d4..5214c7b03 100644 --- a/vendor/github.com/ugorji/go/codec/build.sh +++ b/vendor/github.com/ugorji/go/codec/build.sh @@ -1,232 +1,61 @@ #!/bin/bash -# Run all the different permutations of all the tests and other things -# This helps ensure that nothing gets broken. +# Build and Run the different test permutations. +# This helps validate that nothing gets broken. -_tests() { - local vet="" # TODO: make it off - local gover=$( ${gocmd} version | cut -f 3 -d ' ' ) - [[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0 - [[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0 - case $gover in - go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;; - *) return 1 - esac - # note that codecgen requires fastpath, so you cannot do "codecgen codec.notfastpath" - # we test the following permutations wnich all execute different code paths as below. - echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe), (codecgen/unsafe)" - local echo=1 - local nc=2 # count - local cpus="1,$(nproc)" - # if using the race detector, then set nc to - if [[ " ${zargs[@]} " =~ "-race" ]]; then - cpus="$(nproc)" - fi - local a=( "" "codec.notfastpath" "codec.safe" "codec.notfastpath codec.safe" "codecgen" ) - local b=() - local c=() - for i in "${a[@]}" - do - local i2=${i:-default} - [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'" - [[ "$zcover" == "1" ]] && c=( -coverprofile "${i2// /-}.cov.out" ) - true && - ${gocmd} vet -printfuncs "errorf" "$@" && - if [[ "$echo" == 1 ]]; then set -o xtrace; fi && - ${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" -count $nc -cpu $cpus -run "TestCodecSuite" "${c[@]}" "$@" & - if [[ "$echo" == 1 ]]; then set +o xtrace; fi - b+=("${i2// /-}.cov.out") - [[ "$zwait" == "1" ]] && wait - - # if [[ "$?" != 0 ]]; then return 1; fi +_build_proceed() { + # return success (0) if we should, and 1 (fail) if not + if [[ "${zforce}" ]]; then return 0; fi + for a in "fastpath.generated.go" "json.mono.generated.go"; do + if [[ ! -e "$a" ]]; then return 0; fi + for b in `ls -1 *.go.tmpl gen.go gen_mono.go values_test.go`; do + if [[ "$a" -ot "$b" ]]; then return 0; fi + done done - if [[ "$zextra" == "1" ]]; then - [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'codec.notfastpath x'; RUN: 'Test.*X$'" - [[ "$zcover" == "1" ]] && c=( -coverprofile "x.cov.out" ) - ${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "codec.notfastpath x" -count $nc -run 'Test.*X$' "${c[@]}" & - b+=("x.cov.out") - [[ "$zwait" == "1" ]] && wait - fi - wait - # go tool cover is not supported for gccgo, gollvm, other non-standard go compilers - [[ "$zcover" == "1" ]] && - command -v gocovmerge && - gocovmerge "${b[@]}" > __merge.cov.out && - ${gocmd} tool cover -html=__merge.cov.out + return 1 } -# is a generation needed? -_ng() { - local a="$1" - if [[ ! -e "$a" ]]; then echo 1; return; fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]]; then echo 1; return; fi - done -} - -_prependbt() { - cat > ${2} <> ${2} - rm -f ${1} -} - -# _build generates fast-path.go and gen-helper.go. +# _build generates fastpath.go _build() { - if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi - + # if ! [[ "${zforce}" || $(_ng "fastpath.generated.go") || $(_ng "json.mono.generated.go") ]]; then return 0; fi + _build_proceed + if [ $? -eq 1 ]; then return 0; fi if [ "${zbak}" ]; then _zts=`date '+%m%d%Y_%H%M%S'` _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak + [ -e "fastpath${_gg}" ] && mv fastpath${_gg} fastpath${_gg}__${_zts}.bak [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak - fi - rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go + fi + + rm -f fast*path.generated.go *mono*generated.go *_generated_test.go gen-from-tmpl*.generated.go - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.sort-slice-stubs.generated.go <> gen-from-tmpl.sort-slice-stubs.generated.go < gen-from-tmpl.generated.go < bench/shared_test.go # explicitly return 0 if this passes, else return 1 - local btags="codec.notfastpath codec.safe codecgen.exec" - rm -f sort-slice.generated.go fast-path.generated.go gen-helper.generated.go mammoth_generated_test.go mammoth2_generated_test.go - - cat > gen-from-tmpl.sort-slice.generated.go < gen-from-tmpl.generated.go < $f <>$f - if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi - (false || - (echo "===== BUILDING GO SDK for branch: $i ... =====" && - cd $GOROOT && - git checkout -f $i && git reset --hard && git clean -f . && - cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && - echo "===== GO SDK BUILD DONE =====" && - _prebuild && - echo "===== PREBUILD DONE with exit: $? =====" && - _tests "$@" - if [[ "$?" != 0 ]]; then return 1; fi +_tests() { + local vet="" # TODO: make it off + local gover=$( ${gocmd} version | cut -f 3 -d ' ' ) + # go tool cover is not supported for gccgo, gollvm, other non-standard go compilers + [[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0 + [[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0 + case $gover in + go1.2[0-9]*|go2.*|devel*) true ;; + *) return 1 + esac + # we test the following permutations wnich all execute different code paths as below. + echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe)" + local nc=2 # count + local cpus="1,$(nproc)" + # if using the race detector, then set nc to + if [[ " ${zargs[@]} " =~ "-race" ]]; then + cpus="$(nproc)" + fi + local covdir="" + local a=( "" "codec.safe" "codec.notfastpath" "codec.safe codec.notfastpath" + "codec.notmono" "codec.notmono codec.safe" + "codec.notmono codec.notfastpath" "codec.notmono codec.safe codec.notfastpath" ) + [[ "$zextra" == "1" ]] && a+=( "x" ) + [[ "$zcover" == "1" ]] && covdir=`mktemp -d` + ${gocmd} vet -printfuncs "errorf" "$@" || return 1 + for i in "${a[@]}"; do + local j=${i:-default}; j="${j// /-}"; j="${j//codec./}" + [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'" + _tests_run_one + [[ "$zwait" == "1" ]] && wait + # if [[ "$?" != 0 ]]; then return 1; fi done - zforce=${makeforce} - echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" + wait + [[ "$zcover" == "1" ]] && + echo "go tool covdata output" && + ${gocmd} tool covdata percent -i $covdir && + ${gocmd} tool covdata textfmt -i $covdir -o __cov.out && + ${gocmd} tool cover -html=__cov.out } _usage() { @@ -306,11 +147,10 @@ _usage() { # -pf [p=prebuild (f=force)] cat < t=tests [e=extra, s=short, o=cover, w=wait] -[md] -> [m=make, d=race detector] - -[n l i] -> [n=inlining diagnostics, l=mid-stack inlining, i=check inlining for path (path)] - -v -> v=verbose + -v -> v=verbose (more v's to increase verbose level) EOF if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi } @@ -331,15 +171,15 @@ _main() { local gocmd=${MYGOCMD:-go} OPTIND=1 - while getopts ":cetmnrgpfvldsowkxyzi" flag + while getopts ":cetmnrgpfvldsowikxyz" flag do case "x$flag" in + 'xw') zwait=1 ;; + 'xv') zverbose+=(1) ;; 'xo') zcover=1 ;; 'xe') zextra=1 ;; - 'xw') zwait=1 ;; 'xf') zforce=1 ;; 'xs') ztestargs+=("-short") ;; - 'xv') zverbose+=(1) ;; 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; 'xd') zargs+=("-race") ;; @@ -357,14 +197,23 @@ _main() { 'xg') _go ;; 'xp') _prebuild "$@" ;; 'xc') _clean "$@" ;; + esac + + # handle from local run.sh + case "x$x" in + 'xi') _check_inlining_one "$@" ;; + 'xk') _go_compiler_validation_suite ;; 'xx') _analyze_checks "$@" ;; 'xy') _analyze_debug_types "$@" ;; 'xz') _analyze_do_inlining_and_more "$@" ;; - 'xk') _go_compiler_validation_suite ;; - 'xi') _check_inlining_one "$@" ;; esac # unset zforce zargs zbenchflags } [ "." = `dirname $0` ] && _main "$@" +# _xtrace() { +# local - +# set -x +# "${@}" +# } diff --git a/vendor/github.com/ugorji/go/codec/cbor.base.go b/vendor/github.com/ugorji/go/codec/cbor.base.go new file mode 100644 index 000000000..fad562a63 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.base.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +// major +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorString + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorSimpleOrFloat +) + +// simple +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +// indefinite +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString byte = 0x7f + cborBdIndefiniteArray byte = 0x9f + cborBdIndefiniteMap byte = 0xbf + cborBdBreak byte = 0xff +) + +// These define some in-stream descriptors for +// manual encoding e.g. when doing explicit indefinite-length +const ( + CborStreamBytes byte = 0x5f + CborStreamString byte = 0x7f + CborStreamArray byte = 0x9f + CborStreamMap byte = 0xbf + CborStreamBreak byte = 0xff +) + +// base values +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt byte = 0x20 + cborBaseBytes byte = 0x40 + cborBaseString byte = 0x60 + cborBaseArray byte = 0x80 + cborBaseMap byte = 0xa0 + cborBaseTag byte = 0xc0 + cborBaseSimple byte = 0xe0 +) + +// const ( +// cborSelfDesrTag byte = 0xd9 +// cborSelfDesrTag2 byte = 0xd9 +// cborSelfDesrTag3 byte = 0xf7 +// ) + +var ( + cbordescSimpleNames = map[byte]string{ + cborBdNil: "nil", + cborBdFalse: "false", + cborBdTrue: "true", + cborBdFloat16: "float", + cborBdFloat32: "float", + cborBdFloat64: "float", + cborBdBreak: "break", + } + cbordescIndefNames = map[byte]string{ + cborBdIndefiniteBytes: "bytes*", + cborBdIndefiniteString: "string*", + cborBdIndefiniteArray: "array*", + cborBdIndefiniteMap: "map*", + } + cbordescMajorNames = map[byte]string{ + cborMajorUint: "(u)int", + cborMajorNegInt: "int", + cborMajorBytes: "bytes", + cborMajorString: "string", + cborMajorArray: "array", + cborMajorMap: "map", + cborMajorTag: "tag", + cborMajorSimpleOrFloat: "simple", + } +) + +func cbordesc(bd byte) (s string) { + bm := bd >> 5 + if bm == cborMajorSimpleOrFloat { + s = cbordescSimpleNames[bd] + } else { + s = cbordescMajorNames[bm] + if s == "" { + s = cbordescIndefNames[bd] + } + } + if s == "" { + s = "unknown" + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, +// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +type CborHandle struct { + binaryEncodingType + notJsonType + // noElemSeparators + BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool + + // TimeRFC3339 says to encode time.Time using RFC3339 format. + // If unset, we encode time.Time using seconds past epoch. + TimeRFC3339 bool + + // SkipUnexpectedTags says to skip over any tags for which extensions are + // not defined. This is in keeping with the cbor spec on "Optional Tagging of Items". + // + // Furthermore, this allows the skipping over of the Self Describing Tag 0xd9d9f7. + SkipUnexpectedTags bool +} + +// Name returns the name of the handle: cbor +func (h *CborHandle) Name() string { return "cbor" } + +func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) } + +// SetInterfaceExt sets an extension +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, makeExt(ext)) +} diff --git a/vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go new file mode 100644 index 000000000..d9456f3e8 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go @@ -0,0 +1,12482 @@ +//go:build !notmono && !codec.notmono && !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "slices" + "sort" +) + +type fastpathECborBytes struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderCborBytes, *encFnInfo, reflect.Value) +} +type fastpathDCborBytes struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderCborBytes, *decFnInfo, reflect.Value) +} +type fastpathEsCborBytes [56]fastpathECborBytes +type fastpathDsCborBytes [56]fastpathDCborBytes +type fastpathETCborBytes struct{} +type fastpathDTCborBytes struct{} + +func (helperEncDriverCborBytes) fastpathEList() *fastpathEsCborBytes { + var i uint = 0 + var s fastpathEsCborBytes + fn := func(v interface{}, fe func(*encoderCborBytes, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathECborBytes{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderCborBytes).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderCborBytes).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderCborBytes).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderCborBytes).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderCborBytes).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderCborBytes).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderCborBytes).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderCborBytes).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderCborBytes).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderCborBytes).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderCborBytes).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderCborBytes).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderCborBytes).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderCborBytes).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderCborBytes).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderCborBytes).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderCborBytes).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderCborBytes).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderCborBytes).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderCborBytes).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderCborBytes).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderCborBytes).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderCborBytes).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderCborBytes).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderCborBytes).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderCborBytes).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderCborBytes).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderCborBytes).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderCborBytes).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderCborBytes).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderCborBytes).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderCborBytes).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderCborBytes).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderCborBytes).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderCborBytes).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderCborBytes).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderCborBytes).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderCborBytes).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderCborBytes).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderCborBytes).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderCborBytes).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderCborBytes).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderCborBytes).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderCborBytes).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderCborBytes).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderCborBytes).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderCborBytes).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderCborBytes).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderCborBytes).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderCborBytes).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderCborBytes).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderCborBytes).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderCborBytes).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderCborBytes).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderCborBytes).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderCborBytes).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverCborBytes) fastpathDList() *fastpathDsCborBytes { + var i uint = 0 + var s fastpathDsCborBytes + fn := func(v interface{}, fd func(*decoderCborBytes, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDCborBytes{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderCborBytes).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderCborBytes).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderCborBytes).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderCborBytes).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderCborBytes).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderCborBytes).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderCborBytes).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderCborBytes).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderCborBytes).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderCborBytes).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderCborBytes).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderCborBytes).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderCborBytes).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderCborBytes).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderCborBytes).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderCborBytes).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderCborBytes).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderCborBytes).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderCborBytes).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderCborBytes).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderCborBytes).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderCborBytes).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderCborBytes).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderCborBytes).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderCborBytes).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderCborBytes).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderCborBytes).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderCborBytes).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderCborBytes).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderCborBytes).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderCborBytes).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderCborBytes).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderCborBytes).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderCborBytes).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderCborBytes).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderCborBytes).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderCborBytes).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderCborBytes).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderCborBytes).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderCborBytes).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderCborBytes).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderCborBytes).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderCborBytes).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderCborBytes).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderCborBytes).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderCborBytes).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderCborBytes).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderCborBytes).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderCborBytes).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderCborBytes).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderCborBytes).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderCborBytes).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderCborBytes).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderCborBytes).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderCborBytes).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderCborBytes).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverCborBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborBytes) bool { + var ft fastpathETCborBytes + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderCborBytes) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETCborBytes) EncSliceIntfV(v []interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceIntfV(v []interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETCborBytes) EncSliceStringV(v []string, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceStringV(v []string, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETCborBytes) EncSliceBytesV(v [][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceBytesV(v [][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETCborBytes) EncSliceFloat32V(v []float32, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceFloat32V(v []float32, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETCborBytes) EncSliceFloat64V(v []float64, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceFloat64V(v []float64, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETCborBytes) EncSliceUint8V(v []uint8, e *encoderCborBytes) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETCborBytes) EncAsMapSliceUint8V(v []uint8, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETCborBytes) EncSliceUint64V(v []uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceUint64V(v []uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETCborBytes) EncSliceIntV(v []int, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceIntV(v []int, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETCborBytes) EncSliceInt32V(v []int32, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceInt32V(v []int32, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETCborBytes) EncSliceInt64V(v []int64, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceInt64V(v []int64, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborBytes + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETCborBytes) EncSliceBoolV(v []bool, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborBytes) EncAsMapSliceBoolV(v []bool, e *encoderCborBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETCborBytes) EncMapStringIntfV(v map[string]interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETCborBytes) EncMapStringStringV(v map[string]string, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETCborBytes) EncMapStringBytesV(v map[string][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETCborBytes) EncMapStringUint8V(v map[string]uint8, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETCborBytes) EncMapStringUint64V(v map[string]uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETCborBytes) EncMapStringIntV(v map[string]int, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETCborBytes) EncMapStringInt32V(v map[string]int32, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETCborBytes) EncMapStringFloat64V(v map[string]float64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETCborBytes) EncMapStringBoolV(v map[string]bool, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETCborBytes) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETCborBytes) EncMapUint8StringV(v map[uint8]string, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETCborBytes) EncMapUint8BytesV(v map[uint8][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETCborBytes) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETCborBytes) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETCborBytes) EncMapUint8IntV(v map[uint8]int, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETCborBytes) EncMapUint8Int32V(v map[uint8]int32, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETCborBytes) EncMapUint8Float64V(v map[uint8]float64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETCborBytes) EncMapUint8BoolV(v map[uint8]bool, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETCborBytes) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETCborBytes) EncMapUint64StringV(v map[uint64]string, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETCborBytes) EncMapUint64BytesV(v map[uint64][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETCborBytes) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETCborBytes) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETCborBytes) EncMapUint64IntV(v map[uint64]int, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETCborBytes) EncMapUint64Int32V(v map[uint64]int32, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETCborBytes) EncMapUint64Float64V(v map[uint64]float64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETCborBytes) EncMapUint64BoolV(v map[uint64]bool, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETCborBytes) EncMapIntIntfV(v map[int]interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETCborBytes) EncMapIntStringV(v map[int]string, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETCborBytes) EncMapIntBytesV(v map[int][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETCborBytes) EncMapIntUint8V(v map[int]uint8, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETCborBytes) EncMapIntUint64V(v map[int]uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETCborBytes) EncMapIntIntV(v map[int]int, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETCborBytes) EncMapIntInt32V(v map[int]int32, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETCborBytes) EncMapIntFloat64V(v map[int]float64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETCborBytes) EncMapIntBoolV(v map[int]bool, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETCborBytes) EncMapInt32IntfV(v map[int32]interface{}, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETCborBytes) EncMapInt32StringV(v map[int32]string, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETCborBytes) EncMapInt32BytesV(v map[int32][]byte, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETCborBytes) EncMapInt32Uint8V(v map[int32]uint8, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETCborBytes) EncMapInt32Uint64V(v map[int32]uint64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETCborBytes) EncMapInt32IntV(v map[int32]int, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETCborBytes) EncMapInt32Int32V(v map[int32]int32, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETCborBytes) EncMapInt32Float64V(v map[int32]float64, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborBytes) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborBytes{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETCborBytes) EncMapInt32BoolV(v map[int32]bool, e *encoderCborBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverCborBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborBytes) bool { + var ft fastpathDTCborBytes + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderCborBytes) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTCborBytes) DecSliceIntfY(v []interface{}, d *decoderCborBytes) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceIntfN(v []interface{}, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTCborBytes) DecSliceStringY(v []string, d *decoderCborBytes) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceStringN(v []string, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTCborBytes) DecSliceBytesY(v [][]byte, d *decoderCborBytes) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceBytesN(v [][]byte, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTCborBytes) DecSliceFloat32Y(v []float32, d *decoderCborBytes) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceFloat32N(v []float32, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTCborBytes) DecSliceFloat64Y(v []float64, d *decoderCborBytes) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceFloat64N(v []float64, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTCborBytes) DecSliceUint8Y(v []uint8, d *decoderCborBytes) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTCborBytes) DecSliceUint8N(v []uint8, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderCborBytes) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTCborBytes) DecSliceUint64Y(v []uint64, d *decoderCborBytes) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceUint64N(v []uint64, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTCborBytes) DecSliceIntY(v []int, d *decoderCborBytes) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceIntN(v []int, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTCborBytes) DecSliceInt32Y(v []int32, d *decoderCborBytes) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceInt32N(v []int32, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTCborBytes) DecSliceInt64Y(v []int64, d *decoderCborBytes) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceInt64N(v []int64, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTCborBytes) DecSliceBoolY(v []bool, d *decoderCborBytes) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborBytes) DecSliceBoolN(v []bool, d *decoderCborBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderCborBytes) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborBytes) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringStringL(v map[string]string, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborBytes) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborBytes) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborBytes) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborBytes) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringIntL(v map[string]int, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborBytes) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborBytes) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborBytes) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborBytes) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborBytes) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborBytes) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborBytes) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborBytes) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborBytes) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborBytes) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborBytes) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborBytes) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborBytes) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborBytes) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborBytes) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborBytes) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborBytes) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborBytes) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborBytes) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborBytes) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborBytes) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborBytes) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborBytes) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntStringL(v map[int]string, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborBytes) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborBytes) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborBytes) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborBytes) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntIntL(v map[int]int, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborBytes) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborBytes) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborBytes) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborBytes) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborBytes) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborBytes) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborBytes) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborBytes) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborBytes) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborBytes) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborBytes) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborBytes) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborBytes) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderCborBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} + +type fastpathECborIO struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderCborIO, *encFnInfo, reflect.Value) +} +type fastpathDCborIO struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderCborIO, *decFnInfo, reflect.Value) +} +type fastpathEsCborIO [56]fastpathECborIO +type fastpathDsCborIO [56]fastpathDCborIO +type fastpathETCborIO struct{} +type fastpathDTCborIO struct{} + +func (helperEncDriverCborIO) fastpathEList() *fastpathEsCborIO { + var i uint = 0 + var s fastpathEsCborIO + fn := func(v interface{}, fe func(*encoderCborIO, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathECborIO{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderCborIO).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderCborIO).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderCborIO).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderCborIO).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderCborIO).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderCborIO).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderCborIO).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderCborIO).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderCborIO).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderCborIO).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderCborIO).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderCborIO).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderCborIO).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderCborIO).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderCborIO).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderCborIO).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderCborIO).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderCborIO).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderCborIO).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderCborIO).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderCborIO).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderCborIO).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderCborIO).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderCborIO).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderCborIO).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderCborIO).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderCborIO).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderCborIO).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderCborIO).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderCborIO).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderCborIO).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderCborIO).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderCborIO).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderCborIO).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderCborIO).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderCborIO).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderCborIO).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderCborIO).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderCborIO).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderCborIO).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderCborIO).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderCborIO).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderCborIO).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderCborIO).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderCborIO).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderCborIO).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderCborIO).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderCborIO).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderCborIO).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderCborIO).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderCborIO).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderCborIO).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderCborIO).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderCborIO).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderCborIO).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderCborIO).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverCborIO) fastpathDList() *fastpathDsCborIO { + var i uint = 0 + var s fastpathDsCborIO + fn := func(v interface{}, fd func(*decoderCborIO, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDCborIO{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderCborIO).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderCborIO).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderCborIO).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderCborIO).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderCborIO).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderCborIO).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderCborIO).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderCborIO).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderCborIO).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderCborIO).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderCborIO).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderCborIO).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderCborIO).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderCborIO).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderCborIO).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderCborIO).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderCborIO).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderCborIO).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderCborIO).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderCborIO).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderCborIO).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderCborIO).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderCborIO).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderCborIO).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderCborIO).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderCborIO).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderCborIO).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderCborIO).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderCborIO).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderCborIO).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderCborIO).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderCborIO).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderCborIO).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderCborIO).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderCborIO).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderCborIO).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderCborIO).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderCborIO).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderCborIO).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderCborIO).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderCborIO).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderCborIO).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderCborIO).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderCborIO).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderCborIO).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderCborIO).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderCborIO).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderCborIO).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderCborIO).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderCborIO).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderCborIO).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderCborIO).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderCborIO).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderCborIO).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderCborIO).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderCborIO).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverCborIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborIO) bool { + var ft fastpathETCborIO + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderCborIO) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETCborIO) EncSliceIntfV(v []interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceIntfV(v []interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETCborIO) EncSliceStringV(v []string, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceStringV(v []string, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETCborIO) EncSliceBytesV(v [][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceBytesV(v [][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETCborIO) EncSliceFloat32V(v []float32, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceFloat32V(v []float32, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETCborIO) EncSliceFloat64V(v []float64, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceFloat64V(v []float64, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETCborIO) EncSliceUint8V(v []uint8, e *encoderCborIO) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETCborIO) EncAsMapSliceUint8V(v []uint8, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETCborIO) EncSliceUint64V(v []uint64, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceUint64V(v []uint64, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETCborIO) EncSliceIntV(v []int, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceIntV(v []int, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETCborIO) EncSliceInt32V(v []int32, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceInt32V(v []int32, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETCborIO) EncSliceInt64V(v []int64, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceInt64V(v []int64, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETCborIO + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETCborIO) EncSliceBoolV(v []bool, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETCborIO) EncAsMapSliceBoolV(v []bool, e *encoderCborIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETCborIO) EncMapStringIntfV(v map[string]interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETCborIO) EncMapStringStringV(v map[string]string, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETCborIO) EncMapStringBytesV(v map[string][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETCborIO) EncMapStringUint8V(v map[string]uint8, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETCborIO) EncMapStringUint64V(v map[string]uint64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETCborIO) EncMapStringIntV(v map[string]int, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETCborIO) EncMapStringInt32V(v map[string]int32, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETCborIO) EncMapStringFloat64V(v map[string]float64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETCborIO) EncMapStringBoolV(v map[string]bool, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETCborIO) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETCborIO) EncMapUint8StringV(v map[uint8]string, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETCborIO) EncMapUint8BytesV(v map[uint8][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETCborIO) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETCborIO) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETCborIO) EncMapUint8IntV(v map[uint8]int, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETCborIO) EncMapUint8Int32V(v map[uint8]int32, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETCborIO) EncMapUint8Float64V(v map[uint8]float64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETCborIO) EncMapUint8BoolV(v map[uint8]bool, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETCborIO) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETCborIO) EncMapUint64StringV(v map[uint64]string, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETCborIO) EncMapUint64BytesV(v map[uint64][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETCborIO) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETCborIO) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETCborIO) EncMapUint64IntV(v map[uint64]int, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETCborIO) EncMapUint64Int32V(v map[uint64]int32, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETCborIO) EncMapUint64Float64V(v map[uint64]float64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETCborIO) EncMapUint64BoolV(v map[uint64]bool, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETCborIO) EncMapIntIntfV(v map[int]interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETCborIO) EncMapIntStringV(v map[int]string, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETCborIO) EncMapIntBytesV(v map[int][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETCborIO) EncMapIntUint8V(v map[int]uint8, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETCborIO) EncMapIntUint64V(v map[int]uint64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETCborIO) EncMapIntIntV(v map[int]int, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETCborIO) EncMapIntInt32V(v map[int]int32, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETCborIO) EncMapIntFloat64V(v map[int]float64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETCborIO) EncMapIntBoolV(v map[int]bool, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETCborIO) EncMapInt32IntfV(v map[int32]interface{}, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETCborIO) EncMapInt32StringV(v map[int32]string, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETCborIO) EncMapInt32BytesV(v map[int32][]byte, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETCborIO) EncMapInt32Uint8V(v map[int32]uint8, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETCborIO) EncMapInt32Uint64V(v map[int32]uint64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETCborIO) EncMapInt32IntV(v map[int32]int, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETCborIO) EncMapInt32Int32V(v map[int32]int32, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETCborIO) EncMapInt32Float64V(v map[int32]float64, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderCborIO) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETCborIO{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETCborIO) EncMapInt32BoolV(v map[int32]bool, e *encoderCborIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverCborIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborIO) bool { + var ft fastpathDTCborIO + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderCborIO) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTCborIO) DecSliceIntfY(v []interface{}, d *decoderCborIO) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceIntfN(v []interface{}, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTCborIO) DecSliceStringY(v []string, d *decoderCborIO) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceStringN(v []string, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTCborIO) DecSliceBytesY(v [][]byte, d *decoderCborIO) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceBytesN(v [][]byte, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTCborIO) DecSliceFloat32Y(v []float32, d *decoderCborIO) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceFloat32N(v []float32, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTCborIO) DecSliceFloat64Y(v []float64, d *decoderCborIO) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceFloat64N(v []float64, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTCborIO) DecSliceUint8Y(v []uint8, d *decoderCborIO) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTCborIO) DecSliceUint8N(v []uint8, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderCborIO) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTCborIO) DecSliceUint64Y(v []uint64, d *decoderCborIO) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceUint64N(v []uint64, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTCborIO) DecSliceIntY(v []int, d *decoderCborIO) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceIntN(v []int, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTCborIO) DecSliceInt32Y(v []int32, d *decoderCborIO) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceInt32N(v []int32, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTCborIO) DecSliceInt64Y(v []int64, d *decoderCborIO) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceInt64N(v []int64, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTCborIO) DecSliceBoolY(v []bool, d *decoderCborIO) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTCborIO) DecSliceBoolN(v []bool, d *decoderCborIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderCborIO) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborIO) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringStringL(v map[string]string, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborIO) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborIO) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborIO) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborIO) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringIntL(v map[string]int, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborIO) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborIO) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborIO) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborIO) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborIO) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborIO) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborIO) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborIO) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborIO) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborIO) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborIO) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborIO) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborIO) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborIO) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborIO) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborIO) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborIO) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborIO) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborIO) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborIO) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborIO) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborIO) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborIO) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntStringL(v map[int]string, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborIO) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborIO) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborIO) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborIO) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntIntL(v map[int]int, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborIO) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborIO) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborIO) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderCborIO) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderCborIO) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderCborIO) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderCborIO) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderCborIO) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderCborIO) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderCborIO) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderCborIO) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderCborIO) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTCborIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTCborIO) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderCborIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go index 802b1fc1d..2229a2b26 100644 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -1,194 +1,92 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "io" "math" + "math/big" "reflect" "time" "unicode/utf8" ) -// major -const ( - cborMajorUint byte = iota - cborMajorNegInt - cborMajorBytes - cborMajorString - cborMajorArray - cborMajorMap - cborMajorTag - cborMajorSimpleOrFloat -) - -// simple -const ( - cborBdFalse byte = 0xf4 + iota - cborBdTrue - cborBdNil - cborBdUndefined - cborBdExt - cborBdFloat16 - cborBdFloat32 - cborBdFloat64 -) - -// indefinite -const ( - cborBdIndefiniteBytes byte = 0x5f - cborBdIndefiniteString byte = 0x7f - cborBdIndefiniteArray byte = 0x9f - cborBdIndefiniteMap byte = 0xbf - cborBdBreak byte = 0xff -) - -// These define some in-stream descriptors for -// manual encoding e.g. when doing explicit indefinite-length -const ( - CborStreamBytes byte = 0x5f - CborStreamString byte = 0x7f - CborStreamArray byte = 0x9f - CborStreamMap byte = 0xbf - CborStreamBreak byte = 0xff -) - -// base values -const ( - cborBaseUint byte = 0x00 - cborBaseNegInt byte = 0x20 - cborBaseBytes byte = 0x40 - cborBaseString byte = 0x60 - cborBaseArray byte = 0x80 - cborBaseMap byte = 0xa0 - cborBaseTag byte = 0xc0 - cborBaseSimple byte = 0xe0 -) - -// const ( -// cborSelfDesrTag byte = 0xd9 -// cborSelfDesrTag2 byte = 0xd9 -// cborSelfDesrTag3 byte = 0xf7 -// ) - -var ( - cbordescSimpleNames = map[byte]string{ - cborBdNil: "nil", - cborBdFalse: "false", - cborBdTrue: "true", - cborBdFloat16: "float", - cborBdFloat32: "float", - cborBdFloat64: "float", - cborBdBreak: "break", - } - cbordescIndefNames = map[byte]string{ - cborBdIndefiniteBytes: "bytes*", - cborBdIndefiniteString: "string*", - cborBdIndefiniteArray: "array*", - cborBdIndefiniteMap: "map*", - } - cbordescMajorNames = map[byte]string{ - cborMajorUint: "(u)int", - cborMajorNegInt: "int", - cborMajorBytes: "bytes", - cborMajorString: "string", - cborMajorArray: "array", - cborMajorMap: "map", - cborMajorTag: "tag", - cborMajorSimpleOrFloat: "simple", - } -) - -func cbordesc(bd byte) (s string) { - bm := bd >> 5 - if bm == cborMajorSimpleOrFloat { - s = cbordescSimpleNames[bd] - } else { - s = cbordescMajorNames[bm] - if s == "" { - s = cbordescIndefNames[bd] - } - } - if s == "" { - s = "unknown" - } - return -} - // ------------------- -type cborEncDriver struct { +type cborEncDriver[T encWriter] struct { noBuiltInTypes encDriverNoState encDriverNoopContainerWriter - h *CborHandle + encDriverContainerNoTrackerT + + h *CborHandle + e *encoderBase + w T + enc encoderI // scratch buffer for: encode time, numbers, etc // // RFC3339Nano uses 35 chars: 2006-01-02T15:04:05.999999999Z07:00 b [40]byte - - e Encoder } -func (e *cborEncDriver) encoder() *Encoder { - return &e.e +func (e *cborEncDriver[T]) EncodeNil() { + e.w.writen1(cborBdNil) } -func (e *cborEncDriver) EncodeNil() { - e.e.encWr.writen1(cborBdNil) -} - -func (e *cborEncDriver) EncodeBool(b bool) { +func (e *cborEncDriver[T]) EncodeBool(b bool) { if b { - e.e.encWr.writen1(cborBdTrue) + e.w.writen1(cborBdTrue) } else { - e.e.encWr.writen1(cborBdFalse) + e.w.writen1(cborBdFalse) } } -func (e *cborEncDriver) EncodeFloat32(f float32) { +func (e *cborEncDriver[T]) EncodeFloat32(f float32) { b := math.Float32bits(f) if e.h.OptimumSize { if h := floatToHalfFloatBits(b); halfFloatToFloatBits(h) == b { - e.e.encWr.writen1(cborBdFloat16) - bigen.writeUint16(e.e.w(), h) + e.w.writen1(cborBdFloat16) + e.w.writen2(bigen.PutUint16(h)) return } } - e.e.encWr.writen1(cborBdFloat32) - bigen.writeUint32(e.e.w(), b) + e.w.writen1(cborBdFloat32) + e.w.writen4(bigen.PutUint32(b)) } -func (e *cborEncDriver) EncodeFloat64(f float64) { +func (e *cborEncDriver[T]) EncodeFloat64(f float64) { if e.h.OptimumSize { if f32 := float32(f); float64(f32) == f { e.EncodeFloat32(f32) return } } - e.e.encWr.writen1(cborBdFloat64) - bigen.writeUint64(e.e.w(), math.Float64bits(f)) + e.w.writen1(cborBdFloat64) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) } -func (e *cborEncDriver) encUint(v uint64, bd byte) { +func (e *cborEncDriver[T]) encUint(v uint64, bd byte) { if v <= 0x17 { - e.e.encWr.writen1(byte(v) + bd) + e.w.writen1(byte(v) + bd) } else if v <= math.MaxUint8 { - e.e.encWr.writen2(bd+0x18, uint8(v)) + e.w.writen2(bd+0x18, uint8(v)) } else if v <= math.MaxUint16 { - e.e.encWr.writen1(bd + 0x19) - bigen.writeUint16(e.e.w(), uint16(v)) + e.w.writen1(bd + 0x19) + e.w.writen2(bigen.PutUint16(uint16(v))) } else if v <= math.MaxUint32 { - e.e.encWr.writen1(bd + 0x1a) - bigen.writeUint32(e.e.w(), uint32(v)) + e.w.writen1(bd + 0x1a) + e.w.writen4(bigen.PutUint32(uint32(v))) } else { // if v <= math.MaxUint64 { - e.e.encWr.writen1(bd + 0x1b) - bigen.writeUint64(e.e.w(), v) + e.w.writen1(bd + 0x1b) + e.w.writen8(bigen.PutUint64(v)) } } -func (e *cborEncDriver) EncodeInt(v int64) { +func (e *cborEncDriver[T]) EncodeInt(v int64) { if v < 0 { e.encUint(uint64(-1-v), cborBaseNegInt) } else { @@ -196,20 +94,20 @@ func (e *cborEncDriver) EncodeInt(v int64) { } } -func (e *cborEncDriver) EncodeUint(v uint64) { +func (e *cborEncDriver[T]) EncodeUint(v uint64) { e.encUint(v, cborBaseUint) } -func (e *cborEncDriver) encLen(bd byte, length int) { +func (e *cborEncDriver[T]) encLen(bd byte, length int) { e.encUint(uint64(length), bd) } -func (e *cborEncDriver) EncodeTime(t time.Time) { +func (e *cborEncDriver[T]) EncodeTime(t time.Time) { if t.IsZero() { e.EncodeNil() } else if e.h.TimeRFC3339 { e.encUint(0, cborBaseTag) - e.encStringBytesS(cborBaseString, stringView(fmtTime(t, time.RFC3339Nano, e.b[:0]))) + e.encStringBytesS(cborBaseString, stringView(t.AppendFormat(e.b[:0], time.RFC3339Nano))) } else { e.encUint(1, cborBaseTag) t = t.UTC().Round(time.Microsecond) @@ -222,56 +120,75 @@ func (e *cborEncDriver) EncodeTime(t time.Time) { } } -func (e *cborEncDriver) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (e *cborEncDriver[T]) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { e.encUint(uint64(xtag), cborBaseTag) if ext == SelfExt { - e.e.encodeValue(baseRV(rv), e.h.fnNoExt(basetype)) + e.enc.encodeAs(rv, basetype, false) } else if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() + e.writeNilBytes() } else { - e.e.encode(v) + e.enc.encodeI(v) } } -func (e *cborEncDriver) EncodeRawExt(re *RawExt) { +func (e *cborEncDriver[T]) EncodeRawExt(re *RawExt) { e.encUint(uint64(re.Tag), cborBaseTag) - // only encodes re.Value (never re.Data) - if re.Value != nil { - e.e.encode(re.Value) + if re.Data != nil { + e.w.writeb(re.Data) + } else if re.Value != nil { + e.enc.encodeI(re.Value) } else { e.EncodeNil() } } -func (e *cborEncDriver) WriteArrayStart(length int) { +func (e *cborEncDriver[T]) WriteArrayEmpty() { if e.h.IndefiniteLength { - e.e.encWr.writen1(cborBdIndefiniteArray) + e.w.writen2(cborBdIndefiniteArray, cborBdBreak) + } else { + e.w.writen1(cborBaseArray) + // e.encLen(cborBaseArray, 0) + } +} + +func (e *cborEncDriver[T]) WriteMapEmpty() { + if e.h.IndefiniteLength { + e.w.writen2(cborBdIndefiniteMap, cborBdBreak) + } else { + e.w.writen1(cborBaseMap) + // e.encLen(cborBaseMap, 0) + } +} + +func (e *cborEncDriver[T]) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) } else { e.encLen(cborBaseArray, length) } } -func (e *cborEncDriver) WriteMapStart(length int) { +func (e *cborEncDriver[T]) WriteMapStart(length int) { if e.h.IndefiniteLength { - e.e.encWr.writen1(cborBdIndefiniteMap) + e.w.writen1(cborBdIndefiniteMap) } else { e.encLen(cborBaseMap, length) } } -func (e *cborEncDriver) WriteMapEnd() { +func (e *cborEncDriver[T]) WriteMapEnd() { if e.h.IndefiniteLength { - e.e.encWr.writen1(cborBdBreak) + e.w.writen1(cborBdBreak) } } -func (e *cborEncDriver) WriteArrayEnd() { +func (e *cborEncDriver[T]) WriteArrayEnd() { if e.h.IndefiniteLength { - e.e.encWr.writen1(cborBdBreak) + e.w.writen1(cborBdBreak) } } -func (e *cborEncDriver) EncodeString(v string) { +func (e *cborEncDriver[T]) EncodeString(v string) { bb := cborBaseString if e.h.StringToRaw { bb = cborBaseBytes @@ -279,74 +196,87 @@ func (e *cborEncDriver) EncodeString(v string) { e.encStringBytesS(bb, v) } -func (e *cborEncDriver) EncodeStringBytesRaw(v []byte) { - if v == nil { - e.EncodeNil() +func (e *cborEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *cborEncDriver[T]) EncodeStringBytesRaw(v []byte) { + e.encStringBytesS(cborBaseBytes, stringView(v)) +} + +func (e *cborEncDriver[T]) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + vlen := uint(len(v)) + n := max(4, min(vlen/4, 1024)) + for i := uint(0); i < vlen; { + i2 := i + n + if i2 >= vlen { + i2 = vlen + } + v2 := v[i:i2] + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) } else { - e.encStringBytesS(cborBaseBytes, stringView(v)) + e.encLen(bb, len(v)) + e.w.writestr(v) } } -func (e *cborEncDriver) encStringBytesS(bb byte, v string) { - if e.h.IndefiniteLength { - if bb == cborBaseBytes { - e.e.encWr.writen1(cborBdIndefiniteBytes) - } else { - e.e.encWr.writen1(cborBdIndefiniteString) - } - var vlen uint = uint(len(v)) - blen := vlen / 4 - if blen == 0 { - blen = 64 - } else if blen > 1024 { - blen = 1024 - } - for i := uint(0); i < vlen; { - var v2 string - i2 := i + blen - if i2 >= i && i2 < vlen { - v2 = v[i:i2] - } else { - v2 = v[i:] - } - e.encLen(bb, len(v2)) - e.e.encWr.writestr(v2) - i = i2 - } - e.e.encWr.writen1(cborBdBreak) - } else { - e.encLen(bb, len(v)) - e.e.encWr.writestr(v) +func (e *cborEncDriver[T]) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return } + e.EncodeStringBytesRaw(v) +} + +func (e *cborEncDriver[T]) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = cborBdNil + } + e.w.writen1(v) +} + +func (e *cborEncDriver[T]) writeNilArray() { + e.writeNilOr(cborBaseArray) +} + +func (e *cborEncDriver[T]) writeNilMap() { + e.writeNilOr(cborBaseMap) +} + +func (e *cborEncDriver[T]) writeNilBytes() { + e.writeNilOr(cborBaseBytes) } // ---------------------- -type cborDecDriver struct { +type cborDecDriver[T decReader] struct { decDriverNoopContainerReader - decDriverNoopNumberHelper - h *CborHandle - bdAndBdread - st bool // skip tags - _ bool // found nil + // decDriverNoopNumberHelper noBuiltInTypes - d Decoder + + h *CborHandle + d *decoderBase + r T + dec decoderI + bdAndBdread + // st bool // skip tags + // bytes bool } -func (d *cborDecDriver) decoder() *Decoder { - return &d.d -} - -func (d *cborDecDriver) descBd() string { - return sprintf("%v (%s)", d.bd, cbordesc(d.bd)) -} - -func (d *cborDecDriver) readNextBd() { - d.bd = d.d.decRd.readn1() +func (d *cborDecDriver[T]) readNextBd() { + d.bd = d.r.readn1() d.bdRead = true } -func (d *cborDecDriver) advanceNil() (null bool) { +func (d *cborDecDriver[T]) advanceNil() (null bool) { if !d.bdRead { d.readNextBd() } @@ -357,7 +287,7 @@ func (d *cborDecDriver) advanceNil() (null bool) { return } -func (d *cborDecDriver) TryNil() bool { +func (d *cborDecDriver[T]) TryNil() bool { return d.advanceNil() } @@ -368,18 +298,18 @@ func (d *cborDecDriver) TryNil() bool { // // By definition, skipTags should not be called before // checking for break, or nil or undefined. -func (d *cborDecDriver) skipTags() { +func (d *cborDecDriver[T]) skipTags() { for d.bd>>5 == cborMajorTag { d.decUint() - d.bd = d.d.decRd.readn1() + d.bd = d.r.readn1() } } -func (d *cborDecDriver) ContainerType() (vt valueType) { +func (d *cborDecDriver[T]) ContainerType() (vt valueType) { if !d.bdRead { d.readNextBd() } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } if d.bd == cborBdNil { @@ -399,7 +329,7 @@ func (d *cborDecDriver) ContainerType() (vt valueType) { return valueTypeUnset } -func (d *cborDecDriver) CheckBreak() (v bool) { +func (d *cborDecDriver[T]) CheckBreak() (v bool) { if !d.bdRead { d.readNextBd() } @@ -410,72 +340,60 @@ func (d *cborDecDriver) CheckBreak() (v bool) { return } -func (d *cborDecDriver) decUint() (ui uint64) { +func (d *cborDecDriver[T]) decUint() (ui uint64) { v := d.bd & 0x1f if v <= 0x17 { ui = uint64(v) } else if v == 0x18 { - ui = uint64(d.d.decRd.readn1()) + ui = uint64(d.r.readn1()) } else if v == 0x19 { - ui = uint64(bigen.Uint16(d.d.decRd.readn2())) + ui = uint64(bigen.Uint16(d.r.readn2())) } else if v == 0x1a { - ui = uint64(bigen.Uint32(d.d.decRd.readn4())) + ui = uint64(bigen.Uint32(d.r.readn4())) } else if v == 0x1b { - ui = uint64(bigen.Uint64(d.d.decRd.readn8())) + ui = uint64(bigen.Uint64(d.r.readn8())) } else { - d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + halt.errorf("invalid descriptor decoding uint: %x/%s (%x)", d.bd, cbordesc(d.bd), v) } return } -func (d *cborDecDriver) decLen() int { +func (d *cborDecDriver[T]) decLen() int { return int(d.decUint()) } -func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte, major byte) []byte { - d.bdRead = false - for !d.CheckBreak() { - chunkMajor := d.bd >> 5 - if chunkMajor != major { - d.d.errorf("malformed indefinite string/bytes %x (%s); contains chunk with major type %v, expected %v", - d.bd, cbordesc(d.bd), chunkMajor, major) - } - n := uint(d.decLen()) - oldLen := uint(len(bs)) - newLen := oldLen + n - if newLen > uint(cap(bs)) { - bs2 := make([]byte, newLen, 2*uint(cap(bs))+n) - copy(bs2, bs) - bs = bs2 - } else { - bs = bs[:newLen] - } - d.d.decRd.readb(bs[oldLen:newLen]) - if d.h.ValidateUnicode && major == cborMajorString && !utf8.Valid(bs[oldLen:newLen]) { - d.d.errorf("indefinite-length text string contains chunk that is not a valid utf-8 sequence: 0x%x", bs[oldLen:newLen]) - } - d.bdRead = false - } - d.bdRead = false - return bs -} - -func (d *cborDecDriver) decFloat() (f float64, ok bool) { +func (d *cborDecDriver[T]) decFloat() (f float64, ok bool) { ok = true switch d.bd { case cborBdFloat16: - f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.d.decRd.readn2())))) + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readn2())))) case cborBdFloat32: - f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4()))) + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) case cborBdFloat64: - f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8())) + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) default: - ok = false + if d.bd>>5 == cborMajorTag { + // extension tag for bignum/decimal + switch d.bd & 0x1f { // tag + case 2: + f = d.decTagBigIntAsFloat(false) + case 3: + f = d.decTagBigIntAsFloat(true) + case 4: + f = d.decTagBigFloatAsFloat(true) + case 5: + f = d.decTagBigFloatAsFloat(false) + default: + ok = false + } + } else { + ok = false + } } return } -func (d *cborDecDriver) decInteger() (ui uint64, neg, ok bool) { +func (d *cborDecDriver[T]) decInteger() (ui uint64, neg, ok bool) { ok = true switch d.bd >> 5 { case cborMajorUint: @@ -489,65 +407,67 @@ func (d *cborDecDriver) decInteger() (ui uint64, neg, ok bool) { return } -func (d *cborDecDriver) DecodeInt64() (i int64) { +func (d *cborDecDriver[T]) DecodeInt64() (i int64) { if d.advanceNil() { return } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } - i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger()) + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, true) d.bdRead = false return } -func (d *cborDecDriver) DecodeUint64() (ui uint64) { +func (d *cborDecDriver[T]) DecodeUint64() (ui uint64) { if d.advanceNil() { return } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } - ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger()) + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) d.bdRead = false return } -func (d *cborDecDriver) DecodeFloat64() (f float64) { +func (d *cborDecDriver[T]) DecodeFloat64() (f float64) { if d.advanceNil() { return } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } - f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat()) + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, true) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *cborDecDriver) DecodeBool() (b bool) { +func (d *cborDecDriver[T]) DecodeBool() (b bool) { if d.advanceNil() { return } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } if d.bd == cborBdTrue { b = true } else if d.bd == cborBdFalse { } else { - d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) + halt.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) } d.bdRead = false return } -func (d *cborDecDriver) ReadMapStart() (length int) { +func (d *cborDecDriver[T]) ReadMapStart() (length int) { if d.advanceNil() { return containerLenNil } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } d.bdRead = false @@ -555,16 +475,16 @@ func (d *cborDecDriver) ReadMapStart() (length int) { return containerLenUnknown } if d.bd>>5 != cborMajorMap { - d.d.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd)) + halt.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd)) } return d.decLen() } -func (d *cborDecDriver) ReadArrayStart() (length int) { +func (d *cborDecDriver[T]) ReadArrayStart() (length int) { if d.advanceNil() { return containerLenNil } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } d.bdRead = false @@ -572,139 +492,206 @@ func (d *cborDecDriver) ReadArrayStart() (length int) { return containerLenUnknown } if d.bd>>5 != cborMajorArray { - d.d.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd)) + halt.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd)) } return d.decLen() } -func (d *cborDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { - d.d.decByteState = decByteStateNone +// MARKER d.d.buf is ONLY used within DecodeBytes. +// Safe to use freely here only. + +func (d *cborDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) { if d.advanceNil() { return } - if d.st { + if d.h.SkipUnexpectedTags { d.skipTags() } - if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { - d.bdRead = false + fnEnsureNonNilBytes := func() { + // buf is nil at first. Ensure a non-nil value is returned. if bs == nil { - d.d.decByteState = decByteStateReuseBuf - return d.decAppendIndefiniteBytes(d.d.b[:0], d.bd>>5) + bs = zeroByteSlice + state = dBytesDetach } - return d.decAppendIndefiniteBytes(bs[:0], d.bd>>5) + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + major := d.bd >> 5 + val4str := d.h.ValidateUnicode && major == cborMajorString + bs = d.d.buf[:0] + d.bdRead = false + for !d.CheckBreak() { + if d.bd>>5 != major { + const msg = "malformed indefinite string/bytes %x (%s); " + + "contains chunk with major type %v, expected %v" + halt.errorf(msg, d.bd, cbordesc(d.bd), d.bd>>5, major) + } + n := uint(d.decLen()) + bs = append(bs, d.r.readx(n)...) + d.bdRead = false + if val4str && !utf8.Valid(bs[len(bs)-int(n):]) { + const msg = "indefinite-length text string contains chunk " + + "that is not a valid utf-8 sequence: 0x%x" + halt.errorf(msg, bs[len(bs)-int(n):]) + } + } + d.bdRead = false + d.d.buf = bs + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return } if d.bd == cborBdIndefiniteArray { d.bdRead = false - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:0] - } else { - bs = bs[:0] - } + bs = d.d.buf[:0] for !d.CheckBreak() { bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) } - return bs + d.d.buf = bs + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return } + var cond bool if d.bd>>5 == cborMajorArray { d.bdRead = false - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } slen := d.decLen() - var changed bool - if bs, changed = usableByteSlice(bs, slen); changed { - d.d.decByteState = decByteStateNone - } + bs, cond = usableByteSlice(d.d.buf, slen) for i := 0; i < len(bs); i++ { bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) } for i := len(bs); i < slen; i++ { bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) } - return bs + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return } clen := d.decLen() d.bdRead = false - if d.d.zerocopy() { - d.d.decByteState = decByteStateZerocopy - return d.d.decRd.rb.readx(uint(clen)) - } - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } - return decByteSlice(d.d.r(), clen, d.h.MaxInitLen, bs) + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return } -func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { - s = d.DecodeBytes(nil) - if d.h.ValidateUnicode && !utf8.Valid(s) { - d.d.errorf("DecodeStringAsBytes: invalid UTF-8: %s", s) +func (d *cborDecDriver[T]) DecodeStringAsBytes() (out []byte, state dBytesAttachState) { + out, state = d.DecodeBytes() + if d.h.ValidateUnicode && !utf8.Valid(out) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out) } return } -func (d *cborDecDriver) DecodeTime() (t time.Time) { +func (d *cborDecDriver[T]) DecodeTime() (t time.Time) { if d.advanceNil() { return } if d.bd>>5 != cborMajorTag { - d.d.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) + halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) } xtag := d.decUint() d.bdRead = false return d.decodeTime(xtag) } -func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) { +func (d *cborDecDriver[T]) decodeTime(xtag uint64) (t time.Time) { switch xtag { case 0: var err error - t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())) - d.d.onerror(err) + t, err = time.Parse(time.RFC3339, stringView(bytesOKs(d.DecodeStringAsBytes()))) + halt.onerror(err) case 1: f1, f2 := math.Modf(d.DecodeFloat64()) t = time.Unix(int64(f1), int64(f2*1e9)) default: - d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) + halt.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) } t = t.UTC().Round(time.Microsecond) return } -func (d *cborDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (d *cborDecDriver[T]) preDecodeExt(checkTag bool, xtag uint64) (realxtag uint64, ok bool) { if d.advanceNil() { return } if d.bd>>5 != cborMajorTag { - d.d.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) + halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) } - realxtag := d.decUint() + realxtag = d.decUint() d.bdRead = false - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - d.d.decode(&re.Value) - } else if xtag != realxtag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) - } else if ext == SelfExt { - d.d.decodeValue(baseRV(rv), d.h.fnNoExt(basetype)) - } else { - d.d.interfaceExtConvertAndDecode(rv, ext) + if checkTag && xtag != realxtag { + halt.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) } - d.bdRead = false + ok = true + return } -func (d *cborDecDriver) DecodeNaked() { +func (d *cborDecDriver[T]) DecodeRawExt(re *RawExt) { + if realxtag, ok := d.preDecodeExt(false, 0); ok { + re.Tag = realxtag + d.dec.decode(&re.Value) + d.bdRead = false + } +} + +func (d *cborDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if _, ok := d.preDecodeExt(true, xtag); ok { + if ext == SelfExt { + d.dec.decodeAs(rv, basetype, false) + } else { + d.dec.interfaceExtConvertAndDecode(rv, ext) + } + d.bdRead = false + } +} + +func (d *cborDecDriver[T]) decTagBigIntAsFloat(neg bool) (f float64) { + bs, _ := d.DecodeBytes() + bi := new(big.Int).SetBytes(bs) + if neg { // neg big.Int + bi0 := bi + bi = new(big.Int).Sub(big.NewInt(-1), bi0) + } + f, _ = bi.Float64() + return +} + +func (d *cborDecDriver[T]) decTagBigFloatAsFloat(decimal bool) (f float64) { + if nn := d.r.readn1(); nn != 82 { + halt.errorf("(%d) decoding decimal/big.Float: expected 2 numbers", nn) + } + exp := d.DecodeInt64() + mant := d.DecodeInt64() + if decimal { // m*(10**e) + // MARKER: if precision/other issues crop, consider using big.Float on base 10. + // The logic is more convoluted, which is why we leverage readFloatResult for now. + rf := readFloatResult{exp: int8(exp)} + if mant >= 0 { + rf.mantissa = uint64(mant) + } else { + rf.neg = true + rf.mantissa = uint64(-mant) + } + f, _ = parseFloat64_reader(rf) + // f = float64(mant) * math.Pow10(exp) + } else { // m*(2**e) + // f = float64(mant) * math.Pow(2, exp) + bfm := new(big.Float).SetPrec(64).SetInt64(mant) + bf := new(big.Float).SetPrec(64).SetMantExp(bfm, int(exp)) + f, _ = bf.Float64() + } + return +} + +func (d *cborDecDriver[T]) DecodeNaked() { if !d.bdRead { d.readNextBd() } n := d.d.naked() var decodeFurther bool - switch d.bd >> 5 { case cborMajorUint: if d.h.SignedInteger { @@ -718,10 +705,10 @@ func (d *cborDecDriver) DecodeNaked() { n.v = valueTypeInt n.i = d.DecodeInt64() case cborMajorBytes: - d.d.fauxUnionReadRawBytes(false) + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy) case cborMajorString: n.v = valueTypeString - n.s = d.d.stringZC(d.DecodeStringAsBytes()) + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) case cborMajorArray: n.v = valueTypeArray decodeFurther = true @@ -731,17 +718,46 @@ func (d *cborDecDriver) DecodeNaked() { case cborMajorTag: n.v = valueTypeExt n.u = d.decUint() + d.bdRead = false n.l = nil - if n.u == 0 || n.u == 1 { - d.bdRead = false - n.v = valueTypeTime - n.t = d.decodeTime(n.u) - } else if d.st && d.h.getExtForTag(n.u) == nil { - // d.skipTags() // no need to call this - tags already skipped - d.bdRead = false - d.DecodeNaked() - return // return when done (as true recursive function) + xx := d.h.getExtForTag(n.u) + if xx == nil { + switch n.u { + case 0, 1: + n.v = valueTypeTime + n.t = d.decodeTime(n.u) + case 2: + n.f = d.decTagBigIntAsFloat(false) + n.v = valueTypeFloat + case 3: + n.f = d.decTagBigIntAsFloat(true) + n.v = valueTypeFloat + case 4: + n.f = d.decTagBigFloatAsFloat(true) + n.v = valueTypeFloat + case 5: + n.f = d.decTagBigFloatAsFloat(false) + n.v = valueTypeFloat + case 55799: // skip + d.DecodeNaked() + default: + if d.h.SkipUnexpectedTags { + d.DecodeNaked() + } + // else we will use standard mode to decode ext e.g. into a RawExt + } + return } + // if n.u == 0 || n.u == 1 { + // d.bdRead = false + // n.v = valueTypeTime + // n.t = d.decodeTime(n.u) + // } else if d.h.SkipUnexpectedTags && d.h.getExtForTag(n.u) == nil { + // // d.skipTags() // no need to call this - tags already skipped + // d.bdRead = false + // d.DecodeNaked() + // return // return when done (as true recursive function) + // } case cborMajorSimpleOrFloat: switch d.bd { case cborBdNil, cborBdUndefined: @@ -756,211 +772,224 @@ func (d *cborDecDriver) DecodeNaked() { n.v = valueTypeFloat n.f = d.DecodeFloat64() default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) } default: // should never happen - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) } if !decodeFurther { d.bdRead = false } } -func (d *cborDecDriver) uintBytes() (v []byte, ui uint64) { +func (d *cborDecDriver[T]) uintBytes() (v []byte, ui uint64) { // this is only used by nextValueBytes, so it's ok to // use readx and bigenstd here. switch vv := d.bd & 0x1f; vv { case 0x18: - v = d.d.decRd.readx(1) + v = d.r.readx(1) ui = uint64(v[0]) case 0x19: - v = d.d.decRd.readx(2) + v = d.r.readx(2) ui = uint64(bigenstd.Uint16(v)) case 0x1a: - v = d.d.decRd.readx(4) + v = d.r.readx(4) ui = uint64(bigenstd.Uint32(v)) case 0x1b: - v = d.d.decRd.readx(8) + v = d.r.readx(8) ui = uint64(bigenstd.Uint64(v)) default: if vv > 0x1b { - d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + halt.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) } ui = uint64(vv) } return } -func (d *cborDecDriver) nextValueBytes(v0 []byte) (v []byte) { +func (d *cborDecDriver[T]) nextValueBytes() (v []byte) { if !d.bdRead { d.readNextBd() } - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - var cursor = d.d.rb.c - 1 - h.append1(&v, d.bd) - v = d.nextValueBytesBdReadR(v) + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() d.bdRead = false - h.bytesRdV(&v, cursor) return } -func (d *cborDecDriver) nextValueBytesR(v0 []byte) (v []byte) { - d.readNextBd() - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - h.append1(&v, d.bd) - return d.nextValueBytesBdReadR(v) -} +// func (d *cborDecDriver[T]) nextValueBytesR(v0 []byte) (v []byte) { +// d.readNextBd() +// v0 = append(v0, d.bd) +// d.r.startRecording(v0) +// d.nextValueBytesBdReadR() +// v = d.r.stopRecording() +// return +// } -func (d *cborDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - - var bs []byte +func (d *cborDecDriver[T]) nextValueBytesBdReadR() { + // var bs []byte var ui uint64 switch d.bd >> 5 { case cborMajorUint, cborMajorNegInt: - bs, _ = d.uintBytes() - h.appendN(&v, bs...) + d.uintBytes() case cborMajorString, cborMajorBytes: if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { for { d.readNextBd() - h.append1(&v, d.bd) if d.bd == cborBdBreak { break } - bs, ui = d.uintBytes() - h.appendN(&v, bs...) - h.appendN(&v, d.d.decRd.readx(uint(ui))...) + _, ui = d.uintBytes() + d.r.skip(uint(ui)) } } else { - bs, ui = d.uintBytes() - h.appendN(&v, bs...) - h.appendN(&v, d.d.decRd.readx(uint(ui))...) + _, ui = d.uintBytes() + d.r.skip(uint(ui)) } case cborMajorArray: if d.bd == cborBdIndefiniteArray { for { d.readNextBd() - h.append1(&v, d.bd) if d.bd == cborBdBreak { break } - v = d.nextValueBytesBdReadR(v) + d.nextValueBytesBdReadR() } } else { - bs, ui = d.uintBytes() - h.appendN(&v, bs...) + _, ui = d.uintBytes() for i := uint64(0); i < ui; i++ { - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() } } case cborMajorMap: if d.bd == cborBdIndefiniteMap { for { d.readNextBd() - h.append1(&v, d.bd) if d.bd == cborBdBreak { break } - v = d.nextValueBytesBdReadR(v) - v = d.nextValueBytesR(v) + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } } else { - bs, ui = d.uintBytes() - h.appendN(&v, bs...) + _, ui = d.uintBytes() for i := uint64(0); i < ui; i++ { - v = d.nextValueBytesR(v) - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } } case cborMajorTag: - bs, _ = d.uintBytes() - h.appendN(&v, bs...) - v = d.nextValueBytesR(v) + d.uintBytes() + d.readNextBd() + d.nextValueBytesBdReadR() case cborMajorSimpleOrFloat: switch d.bd { case cborBdNil, cborBdUndefined, cborBdFalse, cborBdTrue: // pass case cborBdFloat16: - h.appendN(&v, d.d.decRd.readx(2)...) + d.r.skip(2) case cborBdFloat32: - h.appendN(&v, d.d.decRd.readx(4)...) + d.r.skip(4) case cborBdFloat64: - h.appendN(&v, d.d.decRd.readx(8)...) + d.r.skip(8) default: - d.d.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) + halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) } default: // should never happen - d.d.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) + halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) } return } -// ------------------------- - -// CborHandle is a Handle for the CBOR encoding format, -// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . -// -// CBOR is comprehensively supported, including support for: -// - indefinite-length arrays/maps/bytes/strings -// - (extension) tags in range 0..0xffff (0 .. 65535) -// - half, single and double-precision floats -// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) -// - nil, true, false, ... -// - arrays and maps, bytes and text strings -// -// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. -// Users can implement them as needed (using SetExt), including spec-documented ones: -// - timestamp, BigNum, BigFloat, Decimals, -// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. -type CborHandle struct { - binaryEncodingType - // noElemSeparators - BasicHandle - - // IndefiniteLength=true, means that we encode using indefinitelength - IndefiniteLength bool - - // TimeRFC3339 says to encode time.Time using RFC3339 format. - // If unset, we encode time.Time using seconds past epoch. - TimeRFC3339 bool - - // SkipUnexpectedTags says to skip over any tags for which extensions are - // not defined. This is in keeping with the cbor spec on "Optional Tagging of Items". - // - // Furthermore, this allows the skipping over of the Self Describing Tag 0xd9d9f7. - SkipUnexpectedTags bool -} - -// Name returns the name of the handle: cbor -func (h *CborHandle) Name() string { return "cbor" } - -func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) } - -func (h *CborHandle) newEncDriver() encDriver { - var e = &cborEncDriver{h: h} - e.e.e = e - e.e.init(h) - e.reset() - return e -} - -func (h *CborHandle) newDecDriver() decDriver { - d := &cborDecDriver{h: h, st: h.SkipUnexpectedTags} - d.d.d = d - d.d.cbor = true - d.d.init(h) - d.reset() - return d -} - -func (d *cborDecDriver) reset() { +func (d *cborDecDriver[T]) reset() { d.bdAndBdread.reset() - d.st = d.h.SkipUnexpectedTags + // d.st = d.h.SkipUnexpectedTags } -var _ decDriver = (*cborDecDriver)(nil) -var _ encDriver = (*cborEncDriver)(nil) +// ---- +// +// The following below are similar across all format files (except for the format name). +// +// We keep them together here, so that we can easily copy and compare. + +// ---- + +func (d *cborEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*CborHandle) + d.e = shared + if shared.bytes { + fp = cborFpEncBytes + } else { + fp = cborFpEncIO + } + // d.w.init() + d.init2(enc) + return +} + +func (e *cborEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) } + +// func (e *cborEncDriver[T]) writeStringAsisDblQuoted(v string) { e.w.writeqstr(v) } + +func (e *cborEncDriver[T]) writerEnd() { e.w.end() } + +func (e *cborEncDriver[T]) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *cborEncDriver[T]) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +// ---- + +func (d *cborDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*CborHandle) + d.d = shared + if shared.bytes { + fp = cborFpDecBytes + } else { + fp = cborFpDecIO + } + // d.r.init() + d.init2(dec) + return +} + +func (d *cborDecDriver[T]) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *cborDecDriver[T]) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *cborDecDriver[T]) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +// ---- (custom stanza) + +func (d *cborDecDriver[T]) descBd() string { + return sprintf("%v (%s)", d.bd, cbordesc(d.bd)) +} + +func (d *cborDecDriver[T]) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} + +func (d *cborEncDriver[T]) init2(enc encoderI) { + d.enc = enc +} + +func (d *cborDecDriver[T]) init2(dec decoderI) { + d.dec = dec + // d.d.cbor = true +} diff --git a/vendor/github.com/ugorji/go/codec/cbor.mono.generated.go b/vendor/github.com/ugorji/go/codec/cbor.mono.generated.go new file mode 100644 index 000000000..572d5cb56 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.mono.generated.go @@ -0,0 +1,7985 @@ +//go:build !notmono && !codec.notmono + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + + "io" + "math" + "math/big" + "reflect" + "slices" + "sort" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +type helperEncDriverCborBytes struct{} +type encFnCborBytes struct { + i encFnInfo + fe func(*encoderCborBytes, *encFnInfo, reflect.Value) +} +type encRtidFnCborBytes struct { + rtid uintptr + fn *encFnCborBytes +} +type encoderCborBytes struct { + dh helperEncDriverCborBytes + fp *fastpathEsCborBytes + e cborEncDriverBytes + encoderBase +} +type helperDecDriverCborBytes struct{} +type decFnCborBytes struct { + i decFnInfo + fd func(*decoderCborBytes, *decFnInfo, reflect.Value) +} +type decRtidFnCborBytes struct { + rtid uintptr + fn *decFnCborBytes +} +type decoderCborBytes struct { + dh helperDecDriverCborBytes + fp *fastpathDsCborBytes + d cborDecDriverBytes + decoderBase +} +type cborEncDriverBytes struct { + noBuiltInTypes + encDriverNoState + encDriverNoopContainerWriter + encDriverContainerNoTrackerT + + h *CborHandle + e *encoderBase + w bytesEncAppender + enc encoderI + + b [40]byte +} +type cborDecDriverBytes struct { + decDriverNoopContainerReader + + noBuiltInTypes + + h *CborHandle + d *decoderBase + r bytesDecReader + dec decoderI + bdAndBdread +} + +func (e *encoderCborBytes) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderCborBytes) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderCborBytes) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderCborBytes) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderCborBytes) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderCborBytes) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderCborBytes) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderCborBytes) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderCborBytes) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderCborBytes) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderCborBytes) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderCborBytes) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderCborBytes) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderCborBytes) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderCborBytes) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderCborBytes) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderCborBytes) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderCborBytes) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderCborBytes) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderCborBytes) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderCborBytes) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderCborBytes) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderCborBytes) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderCborBytes) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderCborBytes) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderCborBytes) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderCborBytes) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderCborBytes) kSeqFn(rt reflect.Type) (fn *encFnCborBytes) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderCborBytes) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnCborBytes + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderCborBytes) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnCborBytes + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderCborBytes) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderCborBytes) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderCborBytes) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderCborBytes) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderCborBytes) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderCborBytes) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderCborBytes) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderCborBytes) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnCborBytes + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborBytes) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnCborBytes) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderCborBytes) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsCborBytes) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderCborBytes) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderCborBytes) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderCborBytes) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderCborBytes) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderCborBytes) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderCborBytes) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderCborBytes) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderCborBytes) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderCborBytes) encodeValue(rv reflect.Value, fn *encFnCborBytes) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderCborBytes) encodeValueNonNil(rv reflect.Value, fn *encFnCborBytes) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderCborBytes) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderCborBytes) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderCborBytes) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderCborBytes) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderCborBytes) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderCborBytes) fn(t reflect.Type) *encFnCborBytes { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderCborBytes) fnNoExt(t reflect.Type) *encFnCborBytes { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderCborBytes) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderCborBytes) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderCborBytes) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderCborBytes) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderCborBytes) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderCborBytes) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderCborBytes) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderCborBytes) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverCborBytes) newEncoderBytes(out *[]byte, h Handle) *encoderCborBytes { + var c1 encoderCborBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverCborBytes) newEncoderIO(out io.Writer, h Handle) *encoderCborBytes { + var c1 encoderCborBytes + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverCborBytes) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsCborBytes) (f *fastpathECborBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverCborBytes) encFindRtidFn(s []encRtidFnCborBytes, rtid uintptr) (i uint, fn *encFnCborBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverCborBytes) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnCborBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnCborBytes](v)) + } + return +} + +func (dh helperEncDriverCborBytes) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsCborBytes, checkExt bool) (fn *encFnCborBytes) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverCborBytes) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsCborBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnCborBytes) { + rtid := rt2id(rt) + var sp []encRtidFnCborBytes = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverCborBytes) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsCborBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnCborBytes) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnCborBytes + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnCborBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnCborBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnCborBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverCborBytes) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsCborBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnCborBytes) { + fn = new(encFnCborBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderCborBytes).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderCborBytes).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderCborBytes).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderCborBytes).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderCborBytes).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderCborBytes).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderCborBytes).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderCborBytes).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderCborBytes, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderCborBytes).kBool + case reflect.String: + + fn.fe = (*encoderCborBytes).kString + case reflect.Int: + fn.fe = (*encoderCborBytes).kInt + case reflect.Int8: + fn.fe = (*encoderCborBytes).kInt8 + case reflect.Int16: + fn.fe = (*encoderCborBytes).kInt16 + case reflect.Int32: + fn.fe = (*encoderCborBytes).kInt32 + case reflect.Int64: + fn.fe = (*encoderCborBytes).kInt64 + case reflect.Uint: + fn.fe = (*encoderCborBytes).kUint + case reflect.Uint8: + fn.fe = (*encoderCborBytes).kUint8 + case reflect.Uint16: + fn.fe = (*encoderCborBytes).kUint16 + case reflect.Uint32: + fn.fe = (*encoderCborBytes).kUint32 + case reflect.Uint64: + fn.fe = (*encoderCborBytes).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderCborBytes).kUintptr + case reflect.Float32: + fn.fe = (*encoderCborBytes).kFloat32 + case reflect.Float64: + fn.fe = (*encoderCborBytes).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderCborBytes).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderCborBytes).kComplex128 + case reflect.Chan: + fn.fe = (*encoderCborBytes).kChan + case reflect.Slice: + fn.fe = (*encoderCborBytes).kSlice + case reflect.Array: + fn.fe = (*encoderCborBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderCborBytes).kStructSimple + } else { + fn.fe = (*encoderCborBytes).kStruct + } + case reflect.Map: + fn.fe = (*encoderCborBytes).kMap + case reflect.Interface: + + fn.fe = (*encoderCborBytes).kErr + default: + + fn.fe = (*encoderCborBytes).kErr + } + } + } + return +} +func (d *decoderCborBytes) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderCborBytes) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderCborBytes) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderCborBytes) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderCborBytes) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderCborBytes) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderCborBytes) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderCborBytes) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderCborBytes) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderCborBytes) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderCborBytes) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderCborBytes) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderCborBytes) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderCborBytes) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderCborBytes) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderCborBytes) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderCborBytes) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderCborBytes) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderCborBytes) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderCborBytes) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderCborBytes) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderCborBytes) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderCborBytes) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderCborBytes) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderCborBytes) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderCborBytes) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderCborBytes) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderCborBytes) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderCborBytes) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderCborBytes) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderCborBytes) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderCborBytes) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderCborBytes) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnCborBytes + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderCborBytes) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnCborBytes + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborBytes) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnCborBytes + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderCborBytes) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnCborBytes + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderCborBytes) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsCborBytes) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderCborBytes) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderCborBytes) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderCborBytes) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderCborBytes) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderCborBytes) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderCborBytes) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderCborBytes) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderCborBytes) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderCborBytes) Release() {} + +func (d *decoderCborBytes) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderCborBytes) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderCborBytes) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderCborBytes) decodeValue(rv reflect.Value, fn *decFnCborBytes) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderCborBytes) decodeValueNoCheckNil(rv reflect.Value, fn *decFnCborBytes) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderCborBytes) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderCborBytes) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderCborBytes) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderCborBytes) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderCborBytes) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderCborBytes) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderCborBytes) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderCborBytes) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderCborBytes) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderCborBytes) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderCborBytes) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderCborBytes) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderCborBytes) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderCborBytes) fn(t reflect.Type) *decFnCborBytes { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderCborBytes) fnNoExt(t reflect.Type) *decFnCborBytes { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverCborBytes) newDecoderBytes(in []byte, h Handle) *decoderCborBytes { + var c1 decoderCborBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverCborBytes) newDecoderIO(in io.Reader, h Handle) *decoderCborBytes { + var c1 decoderCborBytes + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverCborBytes) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsCborBytes) (f *fastpathDCborBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverCborBytes) decFindRtidFn(s []decRtidFnCborBytes, rtid uintptr) (i uint, fn *decFnCborBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverCborBytes) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnCborBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnCborBytes](v)) + } + return +} + +func (dh helperDecDriverCborBytes) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsCborBytes, + checkExt bool) (fn *decFnCborBytes) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverCborBytes) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsCborBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnCborBytes) { + rtid := rt2id(rt) + var sp []decRtidFnCborBytes = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverCborBytes) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsCborBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnCborBytes) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnCborBytes + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnCborBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnCborBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnCborBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverCborBytes) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsCborBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnCborBytes) { + fn = new(decFnCborBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderCborBytes).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderCborBytes).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderCborBytes).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderCborBytes).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderCborBytes).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderCborBytes).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderCborBytes).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderCborBytes).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderCborBytes, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderCborBytes, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderCborBytes).kBool + case reflect.String: + fn.fd = (*decoderCborBytes).kString + case reflect.Int: + fn.fd = (*decoderCborBytes).kInt + case reflect.Int8: + fn.fd = (*decoderCborBytes).kInt8 + case reflect.Int16: + fn.fd = (*decoderCborBytes).kInt16 + case reflect.Int32: + fn.fd = (*decoderCborBytes).kInt32 + case reflect.Int64: + fn.fd = (*decoderCborBytes).kInt64 + case reflect.Uint: + fn.fd = (*decoderCborBytes).kUint + case reflect.Uint8: + fn.fd = (*decoderCborBytes).kUint8 + case reflect.Uint16: + fn.fd = (*decoderCborBytes).kUint16 + case reflect.Uint32: + fn.fd = (*decoderCborBytes).kUint32 + case reflect.Uint64: + fn.fd = (*decoderCborBytes).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderCborBytes).kUintptr + case reflect.Float32: + fn.fd = (*decoderCborBytes).kFloat32 + case reflect.Float64: + fn.fd = (*decoderCborBytes).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderCborBytes).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderCborBytes).kComplex128 + case reflect.Chan: + fn.fd = (*decoderCborBytes).kChan + case reflect.Slice: + fn.fd = (*decoderCborBytes).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderCborBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderCborBytes).kStructSimple + } else { + fn.fd = (*decoderCborBytes).kStruct + } + case reflect.Map: + fn.fd = (*decoderCborBytes).kMap + case reflect.Interface: + + fn.fd = (*decoderCborBytes).kInterface + default: + + fn.fd = (*decoderCborBytes).kErr + } + } + } + return +} +func (e *cborEncDriverBytes) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriverBytes) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriverBytes) EncodeFloat32(f float32) { + b := math.Float32bits(f) + if e.h.OptimumSize { + if h := floatToHalfFloatBits(b); halfFloatToFloatBits(h) == b { + e.w.writen1(cborBdFloat16) + e.w.writen2(bigen.PutUint16(h)) + return + } + } + e.w.writen1(cborBdFloat32) + e.w.writen4(bigen.PutUint32(b)) +} + +func (e *cborEncDriverBytes) EncodeFloat64(f float64) { + if e.h.OptimumSize { + if f32 := float32(f); float64(f32) == f { + e.EncodeFloat32(f32) + return + } + } + e.w.writen1(cborBdFloat64) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) +} + +func (e *cborEncDriverBytes) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + e.w.writen4(bigen.PutUint32(uint32(v))) + } else { + e.w.writen1(bd + 0x1b) + e.w.writen8(bigen.PutUint64(v)) + } +} + +func (e *cborEncDriverBytes) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriverBytes) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriverBytes) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriverBytes) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else if e.h.TimeRFC3339 { + e.encUint(0, cborBaseTag) + e.encStringBytesS(cborBaseString, stringView(t.AppendFormat(e.b[:0], time.RFC3339Nano))) + } else { + e.encUint(1, cborBaseTag) + t = t.UTC().Round(time.Microsecond) + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + if nsec == 0 { + e.EncodeInt(sec) + } else { + e.EncodeFloat64(float64(sec) + float64(nsec)/1e9) + } + } +} + +func (e *cborEncDriverBytes) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + e.encUint(uint64(xtag), cborBaseTag) + if ext == SelfExt { + e.enc.encodeAs(rv, basetype, false) + } else if v := ext.ConvertExt(rv); v == nil { + e.writeNilBytes() + } else { + e.enc.encodeI(v) + } +} + +func (e *cborEncDriverBytes) EncodeRawExt(re *RawExt) { + e.encUint(uint64(re.Tag), cborBaseTag) + if re.Data != nil { + e.w.writeb(re.Data) + } else if re.Value != nil { + e.enc.encodeI(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriverBytes) WriteArrayEmpty() { + if e.h.IndefiniteLength { + e.w.writen2(cborBdIndefiniteArray, cborBdBreak) + } else { + e.w.writen1(cborBaseArray) + + } +} + +func (e *cborEncDriverBytes) WriteMapEmpty() { + if e.h.IndefiniteLength { + e.w.writen2(cborBdIndefiniteMap, cborBdBreak) + } else { + e.w.writen1(cborBaseMap) + + } +} + +func (e *cborEncDriverBytes) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriverBytes) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriverBytes) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriverBytes) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriverBytes) EncodeString(v string) { + bb := cborBaseString + if e.h.StringToRaw { + bb = cborBaseBytes + } + e.encStringBytesS(bb, v) +} + +func (e *cborEncDriverBytes) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *cborEncDriverBytes) EncodeStringBytesRaw(v []byte) { + e.encStringBytesS(cborBaseBytes, stringView(v)) +} + +func (e *cborEncDriverBytes) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + vlen := uint(len(v)) + n := max(4, min(vlen/4, 1024)) + for i := uint(0); i < vlen; { + i2 := i + n + if i2 >= vlen { + i2 = vlen + } + v2 := v[i:i2] + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) + } +} + +func (e *cborEncDriverBytes) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *cborEncDriverBytes) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = cborBdNil + } + e.w.writen1(v) +} + +func (e *cborEncDriverBytes) writeNilArray() { + e.writeNilOr(cborBaseArray) +} + +func (e *cborEncDriverBytes) writeNilMap() { + e.writeNilOr(cborBaseMap) +} + +func (e *cborEncDriverBytes) writeNilBytes() { + e.writeNilOr(cborBaseBytes) +} + +func (d *cborDecDriverBytes) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriverBytes) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return +} + +func (d *cborDecDriverBytes) TryNil() bool { + return d.advanceNil() +} + +func (d *cborDecDriverBytes) skipTags() { + for d.bd>>5 == cborMajorTag { + d.decUint() + d.bd = d.r.readn1() + } +} + +func (d *cborDecDriverBytes) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + if d.bd == cborBdNil { + d.bdRead = false + return valueTypeNil + } + major := d.bd >> 5 + if major == cborMajorBytes { + return valueTypeBytes + } else if major == cborMajorString { + return valueTypeString + } else if major == cborMajorArray { + return valueTypeArray + } else if major == cborMajorMap { + return valueTypeMap + } + return valueTypeUnset +} + +func (d *cborDecDriverBytes) CheckBreak() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + v = true + } + return +} + +func (d *cborDecDriverBytes) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readn2())) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readn4())) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readn8())) + } else { + halt.errorf("invalid descriptor decoding uint: %x/%s (%x)", d.bd, cbordesc(d.bd), v) + } + return +} + +func (d *cborDecDriverBytes) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriverBytes) decFloat() (f float64, ok bool) { + ok = true + switch d.bd { + case cborBdFloat16: + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readn2())))) + case cborBdFloat32: + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + case cborBdFloat64: + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + default: + if d.bd>>5 == cborMajorTag { + + switch d.bd & 0x1f { + case 2: + f = d.decTagBigIntAsFloat(false) + case 3: + f = d.decTagBigIntAsFloat(true) + case 4: + f = d.decTagBigFloatAsFloat(true) + case 5: + f = d.decTagBigFloatAsFloat(false) + default: + ok = false + } + } else { + ok = false + } + } + return +} + +func (d *cborDecDriverBytes) decInteger() (ui uint64, neg, ok bool) { + ok = true + switch d.bd >> 5 { + case cborMajorUint: + ui = d.decUint() + case cborMajorNegInt: + ui = d.decUint() + neg = true + default: + ok = false + } + return +} + +func (d *cborDecDriverBytes) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, true) + d.bdRead = false + return +} + +func (d *cborDecDriverBytes) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) + d.bdRead = false + return +} + +func (d *cborDecDriverBytes) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, true) + d.bdRead = false + return +} + +func (d *cborDecDriverBytes) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + if d.bd == cborBdTrue { + b = true + } else if d.bd == cborBdFalse { + } else { + halt.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) + } + d.bdRead = false + return +} + +func (d *cborDecDriverBytes) ReadMapStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return containerLenUnknown + } + if d.bd>>5 != cborMajorMap { + halt.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd)) + } + return d.decLen() +} + +func (d *cborDecDriverBytes) ReadArrayStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return containerLenUnknown + } + if d.bd>>5 != cborMajorArray { + halt.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd)) + } + return d.decLen() +} + +func (d *cborDecDriverBytes) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + fnEnsureNonNilBytes := func() { + + if bs == nil { + bs = zeroByteSlice + state = dBytesDetach + } + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + major := d.bd >> 5 + val4str := d.h.ValidateUnicode && major == cborMajorString + bs = d.d.buf[:0] + d.bdRead = false + for !d.CheckBreak() { + if d.bd>>5 != major { + const msg = "malformed indefinite string/bytes %x (%s); " + + "contains chunk with major type %v, expected %v" + halt.errorf(msg, d.bd, cbordesc(d.bd), d.bd>>5, major) + } + n := uint(d.decLen()) + bs = append(bs, d.r.readx(n)...) + d.bdRead = false + if val4str && !utf8.Valid(bs[len(bs)-int(n):]) { + const msg = "indefinite-length text string contains chunk " + + "that is not a valid utf-8 sequence: 0x%x" + halt.errorf(msg, bs[len(bs)-int(n):]) + } + } + d.bdRead = false + d.d.buf = bs + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return + } + if d.bd == cborBdIndefiniteArray { + d.bdRead = false + bs = d.d.buf[:0] + for !d.CheckBreak() { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + d.d.buf = bs + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return + } + var cond bool + if d.bd>>5 == cborMajorArray { + d.bdRead = false + slen := d.decLen() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < len(bs); i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return + } + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *cborDecDriverBytes) DecodeStringAsBytes() (out []byte, state dBytesAttachState) { + out, state = d.DecodeBytes() + if d.h.ValidateUnicode && !utf8.Valid(out) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out) + } + return +} + +func (d *cborDecDriverBytes) DecodeTime() (t time.Time) { + if d.advanceNil() { + return + } + if d.bd>>5 != cborMajorTag { + halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) + } + xtag := d.decUint() + d.bdRead = false + return d.decodeTime(xtag) +} + +func (d *cborDecDriverBytes) decodeTime(xtag uint64) (t time.Time) { + switch xtag { + case 0: + var err error + t, err = time.Parse(time.RFC3339, stringView(bytesOKs(d.DecodeStringAsBytes()))) + halt.onerror(err) + case 1: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + default: + halt.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) + } + t = t.UTC().Round(time.Microsecond) + return +} + +func (d *cborDecDriverBytes) preDecodeExt(checkTag bool, xtag uint64) (realxtag uint64, ok bool) { + if d.advanceNil() { + return + } + if d.bd>>5 != cborMajorTag { + halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) + } + realxtag = d.decUint() + d.bdRead = false + if checkTag && xtag != realxtag { + halt.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + } + ok = true + return +} + +func (d *cborDecDriverBytes) DecodeRawExt(re *RawExt) { + if realxtag, ok := d.preDecodeExt(false, 0); ok { + re.Tag = realxtag + d.dec.decode(&re.Value) + d.bdRead = false + } +} + +func (d *cborDecDriverBytes) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if _, ok := d.preDecodeExt(true, xtag); ok { + if ext == SelfExt { + d.dec.decodeAs(rv, basetype, false) + } else { + d.dec.interfaceExtConvertAndDecode(rv, ext) + } + d.bdRead = false + } +} + +func (d *cborDecDriverBytes) decTagBigIntAsFloat(neg bool) (f float64) { + bs, _ := d.DecodeBytes() + bi := new(big.Int).SetBytes(bs) + if neg { + bi0 := bi + bi = new(big.Int).Sub(big.NewInt(-1), bi0) + } + f, _ = bi.Float64() + return +} + +func (d *cborDecDriverBytes) decTagBigFloatAsFloat(decimal bool) (f float64) { + if nn := d.r.readn1(); nn != 82 { + halt.errorf("(%d) decoding decimal/big.Float: expected 2 numbers", nn) + } + exp := d.DecodeInt64() + mant := d.DecodeInt64() + if decimal { + + rf := readFloatResult{exp: int8(exp)} + if mant >= 0 { + rf.mantissa = uint64(mant) + } else { + rf.neg = true + rf.mantissa = uint64(-mant) + } + f, _ = parseFloat64_reader(rf) + + } else { + + bfm := new(big.Float).SetPrec(64).SetInt64(mant) + bf := new(big.Float).SetPrec(64).SetMantExp(bfm, int(exp)) + f, _ = bf.Float64() + } + return +} + +func (d *cborDecDriverBytes) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + switch d.bd >> 5 { + case cborMajorUint: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case cborMajorNegInt: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case cborMajorBytes: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case cborMajorString: + n.v = valueTypeString + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case cborMajorArray: + n.v = valueTypeArray + decodeFurther = true + case cborMajorMap: + n.v = valueTypeMap + decodeFurther = true + case cborMajorTag: + n.v = valueTypeExt + n.u = d.decUint() + d.bdRead = false + n.l = nil + xx := d.h.getExtForTag(n.u) + if xx == nil { + switch n.u { + case 0, 1: + n.v = valueTypeTime + n.t = d.decodeTime(n.u) + case 2: + n.f = d.decTagBigIntAsFloat(false) + n.v = valueTypeFloat + case 3: + n.f = d.decTagBigIntAsFloat(true) + n.v = valueTypeFloat + case 4: + n.f = d.decTagBigFloatAsFloat(true) + n.v = valueTypeFloat + case 5: + n.f = d.decTagBigFloatAsFloat(false) + n.v = valueTypeFloat + case 55799: + d.DecodeNaked() + default: + if d.h.SkipUnexpectedTags { + d.DecodeNaked() + } + + } + return + } + + case cborMajorSimpleOrFloat: + switch d.bd { + case cborBdNil, cborBdUndefined: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + default: + halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + default: + halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + if !decodeFurther { + d.bdRead = false + } +} + +func (d *cborDecDriverBytes) uintBytes() (v []byte, ui uint64) { + + switch vv := d.bd & 0x1f; vv { + case 0x18: + v = d.r.readx(1) + ui = uint64(v[0]) + case 0x19: + v = d.r.readx(2) + ui = uint64(bigenstd.Uint16(v)) + case 0x1a: + v = d.r.readx(4) + ui = uint64(bigenstd.Uint32(v)) + case 0x1b: + v = d.r.readx(8) + ui = uint64(bigenstd.Uint64(v)) + default: + if vv > 0x1b { + halt.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + } + ui = uint64(vv) + } + return +} + +func (d *cborDecDriverBytes) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *cborDecDriverBytes) nextValueBytesBdReadR() { + + var ui uint64 + + switch d.bd >> 5 { + case cborMajorUint, cborMajorNegInt: + d.uintBytes() + case cborMajorString, cborMajorBytes: + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + for { + d.readNextBd() + if d.bd == cborBdBreak { + break + } + _, ui = d.uintBytes() + d.r.skip(uint(ui)) + } + } else { + _, ui = d.uintBytes() + d.r.skip(uint(ui)) + } + case cborMajorArray: + if d.bd == cborBdIndefiniteArray { + for { + d.readNextBd() + if d.bd == cborBdBreak { + break + } + d.nextValueBytesBdReadR() + } + } else { + _, ui = d.uintBytes() + for i := uint64(0); i < ui; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + } + case cborMajorMap: + if d.bd == cborBdIndefiniteMap { + for { + d.readNextBd() + if d.bd == cborBdBreak { + break + } + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + } else { + _, ui = d.uintBytes() + for i := uint64(0); i < ui; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + } + case cborMajorTag: + d.uintBytes() + d.readNextBd() + d.nextValueBytesBdReadR() + case cborMajorSimpleOrFloat: + switch d.bd { + case cborBdNil, cborBdUndefined, cborBdFalse, cborBdTrue: + case cborBdFloat16: + d.r.skip(2) + case cborBdFloat32: + d.r.skip(4) + case cborBdFloat64: + d.r.skip(8) + default: + halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) + } + default: + halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) + } + return +} + +func (d *cborDecDriverBytes) reset() { + d.bdAndBdread.reset() + +} + +func (d *cborEncDriverBytes) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*CborHandle) + d.e = shared + if shared.bytes { + fp = cborFpEncBytes + } else { + fp = cborFpEncIO + } + + d.init2(enc) + return +} + +func (e *cborEncDriverBytes) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *cborEncDriverBytes) writerEnd() { e.w.end() } + +func (e *cborEncDriverBytes) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *cborEncDriverBytes) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *cborDecDriverBytes) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*CborHandle) + d.d = shared + if shared.bytes { + fp = cborFpDecBytes + } else { + fp = cborFpDecIO + } + + d.init2(dec) + return +} + +func (d *cborDecDriverBytes) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *cborDecDriverBytes) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *cborDecDriverBytes) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *cborDecDriverBytes) descBd() string { + return sprintf("%v (%s)", d.bd, cbordesc(d.bd)) +} + +func (d *cborDecDriverBytes) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} + +func (d *cborEncDriverBytes) init2(enc encoderI) { + d.enc = enc +} + +func (d *cborDecDriverBytes) init2(dec decoderI) { + d.dec = dec + +} + +type helperEncDriverCborIO struct{} +type encFnCborIO struct { + i encFnInfo + fe func(*encoderCborIO, *encFnInfo, reflect.Value) +} +type encRtidFnCborIO struct { + rtid uintptr + fn *encFnCborIO +} +type encoderCborIO struct { + dh helperEncDriverCborIO + fp *fastpathEsCborIO + e cborEncDriverIO + encoderBase +} +type helperDecDriverCborIO struct{} +type decFnCborIO struct { + i decFnInfo + fd func(*decoderCborIO, *decFnInfo, reflect.Value) +} +type decRtidFnCborIO struct { + rtid uintptr + fn *decFnCborIO +} +type decoderCborIO struct { + dh helperDecDriverCborIO + fp *fastpathDsCborIO + d cborDecDriverIO + decoderBase +} +type cborEncDriverIO struct { + noBuiltInTypes + encDriverNoState + encDriverNoopContainerWriter + encDriverContainerNoTrackerT + + h *CborHandle + e *encoderBase + w bufioEncWriter + enc encoderI + + b [40]byte +} +type cborDecDriverIO struct { + decDriverNoopContainerReader + + noBuiltInTypes + + h *CborHandle + d *decoderBase + r ioDecReader + dec decoderI + bdAndBdread +} + +func (e *encoderCborIO) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderCborIO) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderCborIO) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderCborIO) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderCborIO) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderCborIO) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderCborIO) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderCborIO) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderCborIO) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderCborIO) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderCborIO) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderCborIO) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderCborIO) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderCborIO) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderCborIO) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderCborIO) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderCborIO) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderCborIO) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderCborIO) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderCborIO) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderCborIO) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderCborIO) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderCborIO) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderCborIO) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderCborIO) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderCborIO) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderCborIO) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderCborIO) kSeqFn(rt reflect.Type) (fn *encFnCborIO) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderCborIO) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnCborIO + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderCborIO) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnCborIO + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderCborIO) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderCborIO) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderCborIO) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderCborIO) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderCborIO) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderCborIO) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderCborIO) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderCborIO) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnCborIO + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderCborIO) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnCborIO) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderCborIO) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsCborIO) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderCborIO) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderCborIO) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderCborIO) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderCborIO) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderCborIO) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderCborIO) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderCborIO) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderCborIO) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderCborIO) encodeValue(rv reflect.Value, fn *encFnCborIO) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderCborIO) encodeValueNonNil(rv reflect.Value, fn *encFnCborIO) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderCborIO) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderCborIO) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderCborIO) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderCborIO) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderCborIO) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderCborIO) fn(t reflect.Type) *encFnCborIO { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderCborIO) fnNoExt(t reflect.Type) *encFnCborIO { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderCborIO) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderCborIO) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderCborIO) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderCborIO) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderCborIO) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderCborIO) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderCborIO) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderCborIO) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverCborIO) newEncoderBytes(out *[]byte, h Handle) *encoderCborIO { + var c1 encoderCborIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverCborIO) newEncoderIO(out io.Writer, h Handle) *encoderCborIO { + var c1 encoderCborIO + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverCborIO) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsCborIO) (f *fastpathECborIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverCborIO) encFindRtidFn(s []encRtidFnCborIO, rtid uintptr) (i uint, fn *encFnCborIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverCborIO) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnCborIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnCborIO](v)) + } + return +} + +func (dh helperEncDriverCborIO) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsCborIO, checkExt bool) (fn *encFnCborIO) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverCborIO) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsCborIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnCborIO) { + rtid := rt2id(rt) + var sp []encRtidFnCborIO = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverCborIO) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsCborIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnCborIO) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnCborIO + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnCborIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnCborIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnCborIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverCborIO) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsCborIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnCborIO) { + fn = new(encFnCborIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderCborIO).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderCborIO).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderCborIO).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderCborIO).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderCborIO).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderCborIO).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderCborIO).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderCborIO).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderCborIO, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderCborIO).kBool + case reflect.String: + + fn.fe = (*encoderCborIO).kString + case reflect.Int: + fn.fe = (*encoderCborIO).kInt + case reflect.Int8: + fn.fe = (*encoderCborIO).kInt8 + case reflect.Int16: + fn.fe = (*encoderCborIO).kInt16 + case reflect.Int32: + fn.fe = (*encoderCborIO).kInt32 + case reflect.Int64: + fn.fe = (*encoderCborIO).kInt64 + case reflect.Uint: + fn.fe = (*encoderCborIO).kUint + case reflect.Uint8: + fn.fe = (*encoderCborIO).kUint8 + case reflect.Uint16: + fn.fe = (*encoderCborIO).kUint16 + case reflect.Uint32: + fn.fe = (*encoderCborIO).kUint32 + case reflect.Uint64: + fn.fe = (*encoderCborIO).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderCborIO).kUintptr + case reflect.Float32: + fn.fe = (*encoderCborIO).kFloat32 + case reflect.Float64: + fn.fe = (*encoderCborIO).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderCborIO).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderCborIO).kComplex128 + case reflect.Chan: + fn.fe = (*encoderCborIO).kChan + case reflect.Slice: + fn.fe = (*encoderCborIO).kSlice + case reflect.Array: + fn.fe = (*encoderCborIO).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderCborIO).kStructSimple + } else { + fn.fe = (*encoderCborIO).kStruct + } + case reflect.Map: + fn.fe = (*encoderCborIO).kMap + case reflect.Interface: + + fn.fe = (*encoderCborIO).kErr + default: + + fn.fe = (*encoderCborIO).kErr + } + } + } + return +} +func (d *decoderCborIO) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderCborIO) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderCborIO) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderCborIO) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderCborIO) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderCborIO) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderCborIO) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderCborIO) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderCborIO) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderCborIO) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderCborIO) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderCborIO) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderCborIO) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderCborIO) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderCborIO) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderCborIO) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderCborIO) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderCborIO) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderCborIO) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderCborIO) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderCborIO) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderCborIO) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderCborIO) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderCborIO) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderCborIO) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderCborIO) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderCborIO) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderCborIO) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderCborIO) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderCborIO) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderCborIO) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderCborIO) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderCborIO) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnCborIO + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderCborIO) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnCborIO + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderCborIO) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnCborIO + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderCborIO) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnCborIO + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderCborIO) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsCborIO) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderCborIO) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderCborIO) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderCborIO) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderCborIO) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderCborIO) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderCborIO) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderCborIO) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderCborIO) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderCborIO) Release() {} + +func (d *decoderCborIO) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderCborIO) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderCborIO) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderCborIO) decodeValue(rv reflect.Value, fn *decFnCborIO) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderCborIO) decodeValueNoCheckNil(rv reflect.Value, fn *decFnCborIO) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderCborIO) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderCborIO) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderCborIO) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderCborIO) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderCborIO) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderCborIO) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderCborIO) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderCborIO) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderCborIO) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderCborIO) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderCborIO) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderCborIO) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderCborIO) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderCborIO) fn(t reflect.Type) *decFnCborIO { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderCborIO) fnNoExt(t reflect.Type) *decFnCborIO { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverCborIO) newDecoderBytes(in []byte, h Handle) *decoderCborIO { + var c1 decoderCborIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverCborIO) newDecoderIO(in io.Reader, h Handle) *decoderCborIO { + var c1 decoderCborIO + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverCborIO) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsCborIO) (f *fastpathDCborIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverCborIO) decFindRtidFn(s []decRtidFnCborIO, rtid uintptr) (i uint, fn *decFnCborIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverCborIO) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnCborIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnCborIO](v)) + } + return +} + +func (dh helperDecDriverCborIO) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsCborIO, + checkExt bool) (fn *decFnCborIO) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverCborIO) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsCborIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnCborIO) { + rtid := rt2id(rt) + var sp []decRtidFnCborIO = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverCborIO) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsCborIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnCborIO) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnCborIO + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnCborIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnCborIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnCborIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverCborIO) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsCborIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnCborIO) { + fn = new(decFnCborIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderCborIO).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderCborIO).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderCborIO).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderCborIO).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderCborIO).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderCborIO).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderCborIO).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderCborIO).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderCborIO, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderCborIO, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderCborIO).kBool + case reflect.String: + fn.fd = (*decoderCborIO).kString + case reflect.Int: + fn.fd = (*decoderCborIO).kInt + case reflect.Int8: + fn.fd = (*decoderCborIO).kInt8 + case reflect.Int16: + fn.fd = (*decoderCborIO).kInt16 + case reflect.Int32: + fn.fd = (*decoderCborIO).kInt32 + case reflect.Int64: + fn.fd = (*decoderCborIO).kInt64 + case reflect.Uint: + fn.fd = (*decoderCborIO).kUint + case reflect.Uint8: + fn.fd = (*decoderCborIO).kUint8 + case reflect.Uint16: + fn.fd = (*decoderCborIO).kUint16 + case reflect.Uint32: + fn.fd = (*decoderCborIO).kUint32 + case reflect.Uint64: + fn.fd = (*decoderCborIO).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderCborIO).kUintptr + case reflect.Float32: + fn.fd = (*decoderCborIO).kFloat32 + case reflect.Float64: + fn.fd = (*decoderCborIO).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderCborIO).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderCborIO).kComplex128 + case reflect.Chan: + fn.fd = (*decoderCborIO).kChan + case reflect.Slice: + fn.fd = (*decoderCborIO).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderCborIO).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderCborIO).kStructSimple + } else { + fn.fd = (*decoderCborIO).kStruct + } + case reflect.Map: + fn.fd = (*decoderCborIO).kMap + case reflect.Interface: + + fn.fd = (*decoderCborIO).kInterface + default: + + fn.fd = (*decoderCborIO).kErr + } + } + } + return +} +func (e *cborEncDriverIO) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriverIO) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriverIO) EncodeFloat32(f float32) { + b := math.Float32bits(f) + if e.h.OptimumSize { + if h := floatToHalfFloatBits(b); halfFloatToFloatBits(h) == b { + e.w.writen1(cborBdFloat16) + e.w.writen2(bigen.PutUint16(h)) + return + } + } + e.w.writen1(cborBdFloat32) + e.w.writen4(bigen.PutUint32(b)) +} + +func (e *cborEncDriverIO) EncodeFloat64(f float64) { + if e.h.OptimumSize { + if f32 := float32(f); float64(f32) == f { + e.EncodeFloat32(f32) + return + } + } + e.w.writen1(cborBdFloat64) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) +} + +func (e *cborEncDriverIO) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + e.w.writen4(bigen.PutUint32(uint32(v))) + } else { + e.w.writen1(bd + 0x1b) + e.w.writen8(bigen.PutUint64(v)) + } +} + +func (e *cborEncDriverIO) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriverIO) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriverIO) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriverIO) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else if e.h.TimeRFC3339 { + e.encUint(0, cborBaseTag) + e.encStringBytesS(cborBaseString, stringView(t.AppendFormat(e.b[:0], time.RFC3339Nano))) + } else { + e.encUint(1, cborBaseTag) + t = t.UTC().Round(time.Microsecond) + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + if nsec == 0 { + e.EncodeInt(sec) + } else { + e.EncodeFloat64(float64(sec) + float64(nsec)/1e9) + } + } +} + +func (e *cborEncDriverIO) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + e.encUint(uint64(xtag), cborBaseTag) + if ext == SelfExt { + e.enc.encodeAs(rv, basetype, false) + } else if v := ext.ConvertExt(rv); v == nil { + e.writeNilBytes() + } else { + e.enc.encodeI(v) + } +} + +func (e *cborEncDriverIO) EncodeRawExt(re *RawExt) { + e.encUint(uint64(re.Tag), cborBaseTag) + if re.Data != nil { + e.w.writeb(re.Data) + } else if re.Value != nil { + e.enc.encodeI(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriverIO) WriteArrayEmpty() { + if e.h.IndefiniteLength { + e.w.writen2(cborBdIndefiniteArray, cborBdBreak) + } else { + e.w.writen1(cborBaseArray) + + } +} + +func (e *cborEncDriverIO) WriteMapEmpty() { + if e.h.IndefiniteLength { + e.w.writen2(cborBdIndefiniteMap, cborBdBreak) + } else { + e.w.writen1(cborBaseMap) + + } +} + +func (e *cborEncDriverIO) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriverIO) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriverIO) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriverIO) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriverIO) EncodeString(v string) { + bb := cborBaseString + if e.h.StringToRaw { + bb = cborBaseBytes + } + e.encStringBytesS(bb, v) +} + +func (e *cborEncDriverIO) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *cborEncDriverIO) EncodeStringBytesRaw(v []byte) { + e.encStringBytesS(cborBaseBytes, stringView(v)) +} + +func (e *cborEncDriverIO) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + vlen := uint(len(v)) + n := max(4, min(vlen/4, 1024)) + for i := uint(0); i < vlen; { + i2 := i + n + if i2 >= vlen { + i2 = vlen + } + v2 := v[i:i2] + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) + } +} + +func (e *cborEncDriverIO) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *cborEncDriverIO) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = cborBdNil + } + e.w.writen1(v) +} + +func (e *cborEncDriverIO) writeNilArray() { + e.writeNilOr(cborBaseArray) +} + +func (e *cborEncDriverIO) writeNilMap() { + e.writeNilOr(cborBaseMap) +} + +func (e *cborEncDriverIO) writeNilBytes() { + e.writeNilOr(cborBaseBytes) +} + +func (d *cborDecDriverIO) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriverIO) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return +} + +func (d *cborDecDriverIO) TryNil() bool { + return d.advanceNil() +} + +func (d *cborDecDriverIO) skipTags() { + for d.bd>>5 == cborMajorTag { + d.decUint() + d.bd = d.r.readn1() + } +} + +func (d *cborDecDriverIO) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + if d.bd == cborBdNil { + d.bdRead = false + return valueTypeNil + } + major := d.bd >> 5 + if major == cborMajorBytes { + return valueTypeBytes + } else if major == cborMajorString { + return valueTypeString + } else if major == cborMajorArray { + return valueTypeArray + } else if major == cborMajorMap { + return valueTypeMap + } + return valueTypeUnset +} + +func (d *cborDecDriverIO) CheckBreak() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + v = true + } + return +} + +func (d *cborDecDriverIO) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readn2())) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readn4())) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readn8())) + } else { + halt.errorf("invalid descriptor decoding uint: %x/%s (%x)", d.bd, cbordesc(d.bd), v) + } + return +} + +func (d *cborDecDriverIO) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriverIO) decFloat() (f float64, ok bool) { + ok = true + switch d.bd { + case cborBdFloat16: + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readn2())))) + case cborBdFloat32: + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + case cborBdFloat64: + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + default: + if d.bd>>5 == cborMajorTag { + + switch d.bd & 0x1f { + case 2: + f = d.decTagBigIntAsFloat(false) + case 3: + f = d.decTagBigIntAsFloat(true) + case 4: + f = d.decTagBigFloatAsFloat(true) + case 5: + f = d.decTagBigFloatAsFloat(false) + default: + ok = false + } + } else { + ok = false + } + } + return +} + +func (d *cborDecDriverIO) decInteger() (ui uint64, neg, ok bool) { + ok = true + switch d.bd >> 5 { + case cborMajorUint: + ui = d.decUint() + case cborMajorNegInt: + ui = d.decUint() + neg = true + default: + ok = false + } + return +} + +func (d *cborDecDriverIO) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, true) + d.bdRead = false + return +} + +func (d *cborDecDriverIO) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) + d.bdRead = false + return +} + +func (d *cborDecDriverIO) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, true) + d.bdRead = false + return +} + +func (d *cborDecDriverIO) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + if d.bd == cborBdTrue { + b = true + } else if d.bd == cborBdFalse { + } else { + halt.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) + } + d.bdRead = false + return +} + +func (d *cborDecDriverIO) ReadMapStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return containerLenUnknown + } + if d.bd>>5 != cborMajorMap { + halt.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd)) + } + return d.decLen() +} + +func (d *cborDecDriverIO) ReadArrayStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return containerLenUnknown + } + if d.bd>>5 != cborMajorArray { + halt.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd)) + } + return d.decLen() +} + +func (d *cborDecDriverIO) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + if d.h.SkipUnexpectedTags { + d.skipTags() + } + fnEnsureNonNilBytes := func() { + + if bs == nil { + bs = zeroByteSlice + state = dBytesDetach + } + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + major := d.bd >> 5 + val4str := d.h.ValidateUnicode && major == cborMajorString + bs = d.d.buf[:0] + d.bdRead = false + for !d.CheckBreak() { + if d.bd>>5 != major { + const msg = "malformed indefinite string/bytes %x (%s); " + + "contains chunk with major type %v, expected %v" + halt.errorf(msg, d.bd, cbordesc(d.bd), d.bd>>5, major) + } + n := uint(d.decLen()) + bs = append(bs, d.r.readx(n)...) + d.bdRead = false + if val4str && !utf8.Valid(bs[len(bs)-int(n):]) { + const msg = "indefinite-length text string contains chunk " + + "that is not a valid utf-8 sequence: 0x%x" + halt.errorf(msg, bs[len(bs)-int(n):]) + } + } + d.bdRead = false + d.d.buf = bs + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return + } + if d.bd == cborBdIndefiniteArray { + d.bdRead = false + bs = d.d.buf[:0] + for !d.CheckBreak() { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + d.d.buf = bs + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return + } + var cond bool + if d.bd>>5 == cborMajorArray { + d.bdRead = false + slen := d.decLen() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < len(bs); i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + fnEnsureNonNilBytes() + return + } + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *cborDecDriverIO) DecodeStringAsBytes() (out []byte, state dBytesAttachState) { + out, state = d.DecodeBytes() + if d.h.ValidateUnicode && !utf8.Valid(out) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out) + } + return +} + +func (d *cborDecDriverIO) DecodeTime() (t time.Time) { + if d.advanceNil() { + return + } + if d.bd>>5 != cborMajorTag { + halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) + } + xtag := d.decUint() + d.bdRead = false + return d.decodeTime(xtag) +} + +func (d *cborDecDriverIO) decodeTime(xtag uint64) (t time.Time) { + switch xtag { + case 0: + var err error + t, err = time.Parse(time.RFC3339, stringView(bytesOKs(d.DecodeStringAsBytes()))) + halt.onerror(err) + case 1: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + default: + halt.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) + } + t = t.UTC().Round(time.Microsecond) + return +} + +func (d *cborDecDriverIO) preDecodeExt(checkTag bool, xtag uint64) (realxtag uint64, ok bool) { + if d.advanceNil() { + return + } + if d.bd>>5 != cborMajorTag { + halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5) + } + realxtag = d.decUint() + d.bdRead = false + if checkTag && xtag != realxtag { + halt.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + } + ok = true + return +} + +func (d *cborDecDriverIO) DecodeRawExt(re *RawExt) { + if realxtag, ok := d.preDecodeExt(false, 0); ok { + re.Tag = realxtag + d.dec.decode(&re.Value) + d.bdRead = false + } +} + +func (d *cborDecDriverIO) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if _, ok := d.preDecodeExt(true, xtag); ok { + if ext == SelfExt { + d.dec.decodeAs(rv, basetype, false) + } else { + d.dec.interfaceExtConvertAndDecode(rv, ext) + } + d.bdRead = false + } +} + +func (d *cborDecDriverIO) decTagBigIntAsFloat(neg bool) (f float64) { + bs, _ := d.DecodeBytes() + bi := new(big.Int).SetBytes(bs) + if neg { + bi0 := bi + bi = new(big.Int).Sub(big.NewInt(-1), bi0) + } + f, _ = bi.Float64() + return +} + +func (d *cborDecDriverIO) decTagBigFloatAsFloat(decimal bool) (f float64) { + if nn := d.r.readn1(); nn != 82 { + halt.errorf("(%d) decoding decimal/big.Float: expected 2 numbers", nn) + } + exp := d.DecodeInt64() + mant := d.DecodeInt64() + if decimal { + + rf := readFloatResult{exp: int8(exp)} + if mant >= 0 { + rf.mantissa = uint64(mant) + } else { + rf.neg = true + rf.mantissa = uint64(-mant) + } + f, _ = parseFloat64_reader(rf) + + } else { + + bfm := new(big.Float).SetPrec(64).SetInt64(mant) + bf := new(big.Float).SetPrec(64).SetMantExp(bfm, int(exp)) + f, _ = bf.Float64() + } + return +} + +func (d *cborDecDriverIO) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + switch d.bd >> 5 { + case cborMajorUint: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case cborMajorNegInt: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case cborMajorBytes: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case cborMajorString: + n.v = valueTypeString + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case cborMajorArray: + n.v = valueTypeArray + decodeFurther = true + case cborMajorMap: + n.v = valueTypeMap + decodeFurther = true + case cborMajorTag: + n.v = valueTypeExt + n.u = d.decUint() + d.bdRead = false + n.l = nil + xx := d.h.getExtForTag(n.u) + if xx == nil { + switch n.u { + case 0, 1: + n.v = valueTypeTime + n.t = d.decodeTime(n.u) + case 2: + n.f = d.decTagBigIntAsFloat(false) + n.v = valueTypeFloat + case 3: + n.f = d.decTagBigIntAsFloat(true) + n.v = valueTypeFloat + case 4: + n.f = d.decTagBigFloatAsFloat(true) + n.v = valueTypeFloat + case 5: + n.f = d.decTagBigFloatAsFloat(false) + n.v = valueTypeFloat + case 55799: + d.DecodeNaked() + default: + if d.h.SkipUnexpectedTags { + d.DecodeNaked() + } + + } + return + } + + case cborMajorSimpleOrFloat: + switch d.bd { + case cborBdNil, cborBdUndefined: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + default: + halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + default: + halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + if !decodeFurther { + d.bdRead = false + } +} + +func (d *cborDecDriverIO) uintBytes() (v []byte, ui uint64) { + + switch vv := d.bd & 0x1f; vv { + case 0x18: + v = d.r.readx(1) + ui = uint64(v[0]) + case 0x19: + v = d.r.readx(2) + ui = uint64(bigenstd.Uint16(v)) + case 0x1a: + v = d.r.readx(4) + ui = uint64(bigenstd.Uint32(v)) + case 0x1b: + v = d.r.readx(8) + ui = uint64(bigenstd.Uint64(v)) + default: + if vv > 0x1b { + halt.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + } + ui = uint64(vv) + } + return +} + +func (d *cborDecDriverIO) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *cborDecDriverIO) nextValueBytesBdReadR() { + + var ui uint64 + + switch d.bd >> 5 { + case cborMajorUint, cborMajorNegInt: + d.uintBytes() + case cborMajorString, cborMajorBytes: + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + for { + d.readNextBd() + if d.bd == cborBdBreak { + break + } + _, ui = d.uintBytes() + d.r.skip(uint(ui)) + } + } else { + _, ui = d.uintBytes() + d.r.skip(uint(ui)) + } + case cborMajorArray: + if d.bd == cborBdIndefiniteArray { + for { + d.readNextBd() + if d.bd == cborBdBreak { + break + } + d.nextValueBytesBdReadR() + } + } else { + _, ui = d.uintBytes() + for i := uint64(0); i < ui; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + } + case cborMajorMap: + if d.bd == cborBdIndefiniteMap { + for { + d.readNextBd() + if d.bd == cborBdBreak { + break + } + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + } else { + _, ui = d.uintBytes() + for i := uint64(0); i < ui; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + } + case cborMajorTag: + d.uintBytes() + d.readNextBd() + d.nextValueBytesBdReadR() + case cborMajorSimpleOrFloat: + switch d.bd { + case cborBdNil, cborBdUndefined, cborBdFalse, cborBdTrue: + case cborBdFloat16: + d.r.skip(2) + case cborBdFloat32: + d.r.skip(4) + case cborBdFloat64: + d.r.skip(8) + default: + halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) + } + default: + halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd) + } + return +} + +func (d *cborDecDriverIO) reset() { + d.bdAndBdread.reset() + +} + +func (d *cborEncDriverIO) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*CborHandle) + d.e = shared + if shared.bytes { + fp = cborFpEncBytes + } else { + fp = cborFpEncIO + } + + d.init2(enc) + return +} + +func (e *cborEncDriverIO) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *cborEncDriverIO) writerEnd() { e.w.end() } + +func (e *cborEncDriverIO) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *cborEncDriverIO) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *cborDecDriverIO) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*CborHandle) + d.d = shared + if shared.bytes { + fp = cborFpDecBytes + } else { + fp = cborFpDecIO + } + + d.init2(dec) + return +} + +func (d *cborDecDriverIO) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *cborDecDriverIO) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *cborDecDriverIO) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *cborDecDriverIO) descBd() string { + return sprintf("%v (%s)", d.bd, cbordesc(d.bd)) +} + +func (d *cborDecDriverIO) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} + +func (d *cborEncDriverIO) init2(enc encoderI) { + d.enc = enc +} + +func (d *cborDecDriverIO) init2(dec decoderI) { + d.dec = dec + +} diff --git a/vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go new file mode 100644 index 000000000..9ae1be245 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go @@ -0,0 +1,52 @@ +//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +type fastpathECborBytes struct { + rt reflect.Type + encfn func(*encoderCborBytes, *encFnInfo, reflect.Value) +} +type fastpathDCborBytes struct { + rt reflect.Type + decfn func(*decoderCborBytes, *decFnInfo, reflect.Value) +} +type fastpathEsCborBytes [0]fastpathECborBytes +type fastpathDsCborBytes [0]fastpathDCborBytes + +func (helperEncDriverCborBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborBytes) bool { + return false +} +func (helperDecDriverCborBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborBytes) bool { + return false +} + +func (helperEncDriverCborBytes) fastpathEList() (v *fastpathEsCborBytes) { return } +func (helperDecDriverCborBytes) fastpathDList() (v *fastpathDsCborBytes) { return } + +type fastpathECborIO struct { + rt reflect.Type + encfn func(*encoderCborIO, *encFnInfo, reflect.Value) +} +type fastpathDCborIO struct { + rt reflect.Type + decfn func(*decoderCborIO, *decFnInfo, reflect.Value) +} +type fastpathEsCborIO [0]fastpathECborIO +type fastpathDsCborIO [0]fastpathDCborIO + +func (helperEncDriverCborIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborIO) bool { + return false +} +func (helperDecDriverCborIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborIO) bool { + return false +} + +func (helperEncDriverCborIO) fastpathEList() (v *fastpathEsCborIO) { return } +func (helperDecDriverCborIO) fastpathDList() (v *fastpathDsCborIO) { return } diff --git a/vendor/github.com/ugorji/go/codec/codecgen.go b/vendor/github.com/ugorji/go/codec/codecgen.go deleted file mode 100644 index 49fb8e515..000000000 --- a/vendor/github.com/ugorji/go/codec/codecgen.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build codecgen || generated -// +build codecgen generated - -package codec - -// this file sets the codecgen variable to true -// when the build tag codecgen is set. -// -// some tests depend on knowing whether in the context of codecgen or not. -// For example, some tests should be skipped during codecgen e.g. missing fields tests. - -func init() { - codecgen = true -} diff --git a/vendor/github.com/ugorji/go/codec/custom_time.go b/vendor/github.com/ugorji/go/codec/custom_time.go new file mode 100644 index 000000000..c6d9e9676 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/custom_time.go @@ -0,0 +1,191 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "time" +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +func customEncodeTime(t time.Time) []byte { + // t := rv2i(rv).(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + btmp := bigen.PutUint64(uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + btmp := bigen.PutUint32(uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + // zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + btmp0, btmp1 := bigen.PutUint16(z) + // clear dst flags + bs[i] = btmp0 & 0x3f + bs[i+1] = btmp1 + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// customDecodeTime decodes a []byte into a time.Time. +func customDecodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + // if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + } + i = i2 + tsec = int64(bigen.Uint64(btmp)) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printf.d. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + tz = bigen.Uint16([2]byte{bs[i], bs[i+1]}) + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +// customEncodeTimeAsNum encodes time.Time exactly as cbor does. +func customEncodeTimeAsNum(t time.Time) (r interface{}) { + t = t.UTC().Round(time.Microsecond) + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + if nsec == 0 { + r = sec + } else { + r = float64(sec) + float64(nsec)/1e9 + } + return r +} + +// customDecodeTimeAsNum decodes time.Time exactly as cbor does. +func customDecodeTimeAsNum(v interface{}) (t time.Time) { + switch vv := v.(type) { + case int64: + t = time.Unix(vv, 0) + case uint64: + t = time.Unix((int64)(vv), 0) + case float64: + f1, f2 := math.Modf(vv) + t = time.Unix(int64(f1), int64(f2*1e9)) + default: + halt.errorf("expect int64/float64 for time.Time ext: got %T", v) + } + t = t.UTC().Round(time.Microsecond) + return +} diff --git a/vendor/github.com/ugorji/go/codec/decimal.go b/vendor/github.com/ugorji/go/codec/decimal.go index dbb338049..0c74726b3 100644 --- a/vendor/github.com/ugorji/go/codec/decimal.go +++ b/vendor/github.com/ugorji/go/codec/decimal.go @@ -8,6 +8,19 @@ import ( "strconv" ) +type readFloatResult struct { + mantissa uint64 + exp int8 + neg bool + trunc bool + bad bool // bad decimal string + hardexp bool // exponent is hard to handle (> 2 digits, etc) + ok bool + // sawdot bool + // sawexp bool + //_ [2]bool // padding +} + // Per go spec, floats are represented in memory as // IEEE single or double precision floating point values. // @@ -234,6 +247,10 @@ func parseFloat64_custom(b []byte) (f float64, err error) { } func parseUint64_simple(b []byte) (n uint64, ok bool) { + if len(b) > 1 && b[0] == '0' { // punt on numbers with leading zeros + return + } + var i int var n1 uint64 var c uint8 @@ -356,19 +373,6 @@ func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) { return } -type readFloatResult struct { - mantissa uint64 - exp int8 - neg bool - trunc bool - bad bool // bad decimal string - hardexp bool // exponent is hard to handle (> 2 digits, etc) - ok bool - // sawdot bool - // sawexp bool - //_ [2]bool // padding -} - func readFloat(s []byte, y floatinfo) (r readFloatResult) { var i uint // uint, so that we eliminate bounds checking var slen = uint(len(s)) @@ -384,13 +388,23 @@ func readFloat(s []byte, y floatinfo) (r readFloatResult) { i++ } - // we considered punting early if string has length > maxMantDigits, but this doesn't account + // considered punting early if string has length > maxMantDigits, but doesn't account // for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20 var nd, ndMant, dp int8 var sawdot, sawexp bool var xu uint64 + if i+1 < slen && s[i] == '0' { + switch s[i+1] { + case '.', 'e', 'E': + // ok + default: + r.bad = true + return + } + } + LOOP: for ; i < slen; i++ { switch s[i] { diff --git a/vendor/github.com/ugorji/go/codec/decode.base.go b/vendor/github.com/ugorji/go/codec/decode.base.go new file mode 100644 index 000000000..e82f40252 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode.base.go @@ -0,0 +1,944 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "errors" + "io" + "math" + "reflect" + "slices" + "sync" + "time" +) + +func init() { + for _, v := range []interface{}{ + (*string)(nil), + (*bool)(nil), + (*int)(nil), + (*int8)(nil), + (*int16)(nil), + (*int32)(nil), + (*int64)(nil), + (*uint)(nil), + (*uint8)(nil), + (*uint16)(nil), + (*uint32)(nil), + (*uint64)(nil), + (*uintptr)(nil), + (*float32)(nil), + (*float64)(nil), + (*complex64)(nil), + (*complex128)(nil), + (*[]byte)(nil), + ([]byte)(nil), + (*time.Time)(nil), + (*Raw)(nil), + (*interface{})(nil), + } { + decBuiltinRtids = append(decBuiltinRtids, i2rtid(v)) + } + slices.Sort(decBuiltinRtids) +} + +const msgBadDesc = "unrecognized descriptor byte" + +var decBuiltinRtids []uintptr + +// decDriver calls (DecodeBytes and DecodeStringAsBytes) return a state +// of the view they return, allowing consumers to handle appropriately. +// +// sequencing of this is intentional: +// - mutable if <= dBytesAttachBuffer (buf | view | invalid) +// - noCopy if >= dBytesAttachViewZerocopy +type dBytesAttachState uint8 + +const ( + dBytesAttachInvalid dBytesAttachState = iota + dBytesAttachView // (bytes && !zerocopy && !buf) + dBytesAttachBuffer // (buf) + dBytesAttachViewZerocopy // (bytes && zerocopy && !buf) + dBytesDetach // (!bytes && !buf) +) + +type dBytesIntoState uint8 + +const ( + dBytesIntoNoChange dBytesIntoState = iota + dBytesIntoParamOut + dBytesIntoParamOutSlice + dBytesIntoNew +) + +func (x dBytesAttachState) String() string { + switch x { + case dBytesAttachInvalid: + return "invalid" + case dBytesAttachView: + return "view" + case dBytesAttachBuffer: + return "buffer" + case dBytesAttachViewZerocopy: + return "view-zerocopy" + case dBytesDetach: + return "detach" + } + return "unknown" +} + +const ( + decDefMaxDepth = 1024 // maximum depth + decDefChanCap = 64 // should be large, as cap cannot be expanded + decScratchByteArrayLen = (4 + 3) * 8 // around cacheLineSize ie ~64, depending on Decoder size + + // MARKER: massage decScratchByteArrayLen to ensure xxxDecDriver structs fit within cacheLine*N + + // decFailNonEmptyIntf configures whether we error + // when decoding naked into a non-empty interface. + // + // Typically, we cannot decode non-nil stream value into + // nil interface with methods (e.g. io.Reader). + // However, in some scenarios, this should be allowed: + // - MapType + // - SliceType + // - Extensions + // + // Consequently, we should relax this. Put it behind a const flag for now. + decFailNonEmptyIntf = false + + // decUseTransient says whether we should use the transient optimization. + // + // There's potential for GC corruption or memory overwrites if transient isn't + // used carefully, so this flag helps turn it off quickly if needed. + // + // Use it everywhere needed so we can completely remove unused code blocks. + decUseTransient = true +) + +var ( + errNeedMapOrArrayDecodeToStruct = errors.New("only encoded map or array can decode into struct") + errCannotDecodeIntoNil = errors.New("cannot decode into nil") + + errExpandSliceCannotChange = errors.New("expand slice: cannot change") + + errDecoderNotInitialized = errors.New("Decoder not initialized") + + errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") + errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") + errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") + errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") +) + +type decNotDecodeableReason uint8 + +const ( + decNotDecodeableReasonUnknown decNotDecodeableReason = iota + decNotDecodeableReasonBadKind + decNotDecodeableReasonNonAddrValue + decNotDecodeableReasonNilReference +) + +type decDriverI interface { + + // this will check if the next token is a break. + CheckBreak() bool + + // TryNil tries to decode as nil. + // If a nil is in the stream, it consumes it and returns true. + // + // Note: if TryNil returns true, that must be handled. + TryNil() bool + + // ContainerType returns one of: Bytes, String, Nil, Slice or Map. + // + // Return unSet if not known. + // + // Note: Implementations MUST fully consume sentinel container types, specifically Nil. + ContainerType() (vt valueType) + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the fauxUnion.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + + DecodeInt64() (i int64) + DecodeUint64() (ui uint64) + + DecodeFloat32() (f float32) + DecodeFloat64() (f float64) + + DecodeBool() (b bool) + + // DecodeStringAsBytes returns the bytes representing a string. + // It will return a view into scratch buffer or input []byte (if applicable). + // + // Note: This can also decode symbols, if supported. + // + // Users should consume it right away and not store it for later use. + DecodeStringAsBytes() (v []byte, state dBytesAttachState) + + // DecodeBytes returns the bytes representing a binary value. + // It will return a view into scratch buffer or input []byte (if applicable). + DecodeBytes() (out []byte, state dBytesAttachState) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // DecodeExt will decode into an extension. + // ext is never nil. + DecodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + + // DecodeRawExt will decode into a *RawExt + DecodeRawExt(re *RawExt) + + DecodeTime() (t time.Time) + + // ReadArrayStart will return the length of the array. + // If the format doesn't prefix the length, it returns containerLenUnknown. + // If the expected array was a nil in the stream, it returns containerLenNil. + ReadArrayStart() int + + // ReadMapStart will return the length of the array. + // If the format doesn't prefix the length, it returns containerLenUnknown. + // If the expected array was a nil in the stream, it returns containerLenNil. + ReadMapStart() int + + decDriverContainerTracker + + reset() + + // atEndOfDecode() + + // nextValueBytes will return the bytes representing the next value in the stream. + // It generally will include the last byte read, as that is a part of the next value + // in the stream. + nextValueBytes() []byte + + // descBd will describe the token descriptor that signifies what type was decoded + descBd() string + + // isBytes() bool + + resetInBytes(in []byte) + resetInIO(r io.Reader) + + NumBytesRead() int + + init(h Handle, shared *decoderBase, dec decoderI) (fp interface{}) + + // driverStateManager + decNegintPosintFloatNumber +} + +type decInit2er struct{} + +func (decInit2er) init2(dec decoderI) {} + +type decDriverContainerTracker interface { + ReadArrayElem(firstTime bool) + ReadMapElemKey(firstTime bool) + ReadMapElemValue() + ReadArrayEnd() + ReadMapEnd() +} + +type decNegintPosintFloatNumber interface { + decInteger() (ui uint64, neg, ok bool) + decFloat() (f float64, ok bool) +} + +type decDriverNoopNumberHelper struct{} + +func (x decDriverNoopNumberHelper) decInteger() (ui uint64, neg, ok bool) { + panic("decInteger unsupported") +} +func (x decDriverNoopNumberHelper) decFloat() (f float64, ok bool) { panic("decFloat unsupported") } + +type decDriverNoopContainerReader struct{} + +func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { panic("ReadArrayStart unsupported") } +func (x decDriverNoopContainerReader) ReadMapStart() (v int) { panic("ReadMapStart unsupported") } +func (x decDriverNoopContainerReader) ReadArrayEnd() {} +func (x decDriverNoopContainerReader) ReadMapEnd() {} +func (x decDriverNoopContainerReader) ReadArrayElem(firstTime bool) {} +func (x decDriverNoopContainerReader) ReadMapElemKey(firstTime bool) {} +func (x decDriverNoopContainerReader) ReadMapElemValue() {} +func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// ---- + +type decFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + addrD bool // decoding into a pointer is preferred + addrDf bool // force: if addrD, then decode function MUST take a ptr +} + +// DecodeOptions captures configuration options during decode. +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil (unset), we default to map[string]interface{} iff json handle and MapKeyAsString=true, + // else map[interface{}]interface{}. + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil (unset), we default to []interface{} for all formats. + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection + // (string, slice, map, chan). If 0 or negative, we default to a sensible value + // based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int + + // MaxDepth defines the maximum depth when decoding nested + // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). + MaxDepth int16 + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + // + // Deprecated: This does NOTHING and is left behind for compiling compatibility. + // This change is necessitated because 'nil' in a stream now consistently + // means the zero value (ie reset the value to its zero state). + DeleteOnNilMapValue bool + + // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. + // By default, they are decoded as []byte, but can be decoded as string (if configured). + RawToString bool + + // ZeroCopy controls whether decoded values of []byte or string type + // point into the input []byte parameter passed to a NewDecoderBytes/ResetBytes(...) call. + // + // To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer), + // then a []byte or string in the output result may just be a slice of (point into) + // the input bytes. + // + // This optimization prevents unnecessary copying. + // + // However, it is made optional, as the caller MUST ensure that the input parameter []byte is + // not modified after the Decode() happens, as any changes are mirrored in the decoded result. + ZeroCopy bool + + // PreferPointerForStructOrArray controls whether a struct or array + // is stored in a nil interface{}, or a pointer to it. + // + // This mostly impacts when we decode registered extensions. + PreferPointerForStructOrArray bool + + // ValidateUnicode controls will cause decoding to fail if an expected unicode + // string is well-formed but include invalid codepoints. + // + // This could have a performance impact. + ValidateUnicode bool +} + +// ---------------------------------------- + +type decoderBase struct { + perType decPerType + + h *BasicHandle + + rtidFn, rtidFnNoExt *atomicRtidFnSlice + + buf []byte + + // used for interning strings + is internerMap + + err error + + // sd decoderI + + blist bytesFreeList + + mtr bool // is maptype a known type? + str bool // is slicetype a known type? + jsms bool // is json handle, and MapKeyAsString + + bytes bool // uses a bytes reader + bufio bool // uses a ioDecReader with buffer size > 0 + + // ---- cpu cache line boundary? + // ---- writable fields during execution --- *try* to keep in sep cache line + maxdepth int16 + depth int16 + + // Extensions can call Decode() within a current Decode() call. + // We need to know when the top level Decode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + c containerState + + // decByteState + + n fauxUnion + + // b is an always-available scratch buffer used by Decoder and decDrivers. + // By being always-available, it can be used for one-off things without + // having to get from freelist, use, and return back to freelist. + // + // Use it for a narrow set of things e.g. + // - binc uses it for parsing numbers, represented at 8 or less bytes + // - uses as potential buffer for struct field names + b [decScratchByteArrayLen]byte + + hh Handle + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr +} + +func (d *decoderBase) maxInitLen() uint { + return uint(max(1024, d.h.MaxInitLen)) +} + +func (d *decoderBase) naked() *fauxUnion { + return &d.n +} + +func (d *decoderBase) fauxUnionReadRawBytes(dr decDriverI, asString, rawToString bool) { //, handleZeroCopy bool) { + // fauxUnion is only used within DecodeNaked calls; consequently, we should try to intern. + d.n.l, d.n.a = dr.DecodeBytes() + if asString || rawToString { + d.n.v = valueTypeString + d.n.s = d.detach2Str(d.n.l, d.n.a) + } else { + d.n.v = valueTypeBytes + d.n.l = d.detach2Bytes(d.n.l, d.n.a) + } +} + +// Return a fixed (detached) string representation of a []byte. +// +// Possibly get an interned version of a string, +// iff InternString=true and decoding a map key. +// +// This should mostly be used for map keys, struct field names, etc +// where the key type is string. This is because keys of a map/struct are +// typically reused across many objects. +func (d *decoderBase) detach2Str(v []byte, state dBytesAttachState) (s string) { + // note: string([]byte) checks - and optimizes - for len 0 and len 1 + if len(v) <= 1 { + s = string(v) + } else if state >= dBytesAttachViewZerocopy { // !scratchBuf && d.bytes && d.h.ZeroCopy + s = stringView(v) + } else if d.is == nil || d.c != containerMapKey || len(v) > internMaxStrLen { + s = string(v) + } else { + s = d.is.string(v) + } + return +} + +func (d *decoderBase) usableStructFieldNameBytes(buf, v []byte, state dBytesAttachState) (out []byte) { + // In JSON, mapElemValue reads a colon and spaces. + // In bufio mode of ioDecReader, fillbuf could overwrite the read buffer + // which readXXX() calls return sub-slices from. + // + // Consequently, we detach the bytes in this special case. + // + // Note: ioDecReader (non-bufio) and bytesDecReader do not have + // this issue (as no fillbuf exists where bytes might be returned from). + if d.bufio && d.h.jsonHandle && state < dBytesAttachViewZerocopy { + if cap(buf) > len(v) { + out = buf[:len(v)] + } else if len(d.b) > len(v) { + out = d.b[:len(v)] + } else { + out = make([]byte, len(v), max(64, len(v))) + } + copy(out, v) + return + } + return v +} + +func (d *decoderBase) detach2Bytes(in []byte, state dBytesAttachState) (out []byte) { + if cap(in) == 0 || state >= dBytesAttachViewZerocopy { + return in + } + if len(in) == 0 { + return zeroByteSlice + } + out = make([]byte, len(in)) + copy(out, in) + return out +} + +func (d *decoderBase) attachState(usingBufFromReader bool) (r dBytesAttachState) { + if usingBufFromReader { + r = dBytesAttachBuffer + } else if !d.bytes { + r = dBytesDetach + } else if d.h.ZeroCopy { + r = dBytesAttachViewZerocopy + } else { + r = dBytesAttachView + } + return +} + +func (d *decoderBase) mapStart(v int) int { + if v != containerLenNil { + d.depthIncr() + d.c = containerMapStart + } + return v +} + +func (d *decoderBase) HandleName() string { + return d.hh.Name() +} + +func (d *decoderBase) isBytes() bool { + return d.bytes +} + +type decoderI interface { + Decode(v interface{}) (err error) + HandleName() string + MustDecode(v interface{}) + NumBytesRead() int + Release() // deprecated + Reset(r io.Reader) + ResetBytes(in []byte) + ResetString(s string) + + isBytes() bool + wrapErr(v error, err *error) + swallow() + + nextValueBytes() []byte // wrapper method, for use in tests + // getDecDriver() decDriverI + + decode(v interface{}) + decodeAs(v interface{}, t reflect.Type, ext bool) + + interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) +} + +var errDecNoResetBytesWithReader = errors.New("cannot reset an Decoder reading from []byte with a io.Reader") +var errDecNoResetReaderWithBytes = errors.New("cannot reset an Decoder reading from io.Reader with a []byte") + +func setZero(iv interface{}) { + rv, isnil := isNil(iv, false) + if isnil { + return + } + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if isnilBitset.isset(byte(rv.Kind())) && rvIsNil(rv) { + return + } + // var canDecode bool + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *complex64: + *v = 0 + case *complex128: + *v = 0 + case *[]byte: + *v = nil + case *Raw: + *v = nil + case *time.Time: + *v = time.Time{} + case reflect.Value: + decSetNonNilRV2Zero(v) + default: + if !fastpathDecodeSetZeroTypeSwitch(iv) { + decSetNonNilRV2Zero(rv) + } + } +} + +// decSetNonNilRV2Zero will set the non-nil value to its zero value. +func decSetNonNilRV2Zero(v reflect.Value) { + // If not decodeable (settable), we do not touch it. + // We considered empty'ing it if not decodeable e.g. + // - if chan, drain it + // - if map, clear it + // - if slice or array, zero all elements up to len + // + // However, we decided instead that we either will set the + // whole value to the zero value, or leave AS IS. + + k := v.Kind() + if k == reflect.Interface { + decSetNonNilRV2Zero4Intf(v) + } else if k == reflect.Ptr { + decSetNonNilRV2Zero4Ptr(v) + } else if v.CanSet() { + rvSetDirectZero(v) + } +} + +func decSetNonNilRV2Zero4Ptr(v reflect.Value) { + ve := v.Elem() + if ve.CanSet() { + rvSetZero(ve) // we can have a pointer to an interface + } else if v.CanSet() { + rvSetZero(v) + } +} + +func decSetNonNilRV2Zero4Intf(v reflect.Value) { + ve := v.Elem() + if ve.CanSet() { + rvSetDirectZero(ve) // interfaces always have element as a non-interface + } else if v.CanSet() { + rvSetZero(v) + } +} + +func (d *decoderBase) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + halt.errorf("cannot expand array len during decode from %v to %v", any(sliceLen), any(streamLen)) + } +} + +//go:noinline +func (d *decoderBase) haltAsNotDecodeable(rv reflect.Value) { + if !rv.IsValid() { + halt.onerror(errCannotDecodeIntoNil) + } + // check if an interface can be retrieved, before grabbing an interface + if !rv.CanInterface() { + halt.errorf("cannot decode into a value without an interface: %v", rv) + } + halt.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv)) +} + +func (d *decoderBase) depthIncr() { + d.depth++ + if d.depth >= d.maxdepth { + halt.onerror(errMaxDepthExceeded) + } +} + +func (d *decoderBase) depthDecr() { + d.depth-- +} + +func (d *decoderBase) arrayStart(v int) int { + if v != containerLenNil { + d.depthIncr() + d.c = containerArrayStart + } + return v +} + +func (d *decoderBase) oneShotAddrRV(rvt reflect.Type, rvk reflect.Kind) reflect.Value { + // MARKER 2025: is this slow for calling oneShot? + if decUseTransient && d.h.getTypeInfo4RT(baseRT(rvt)).flagCanTransient { + return d.perType.TransientAddrK(rvt, rvk) + } + return rvZeroAddrK(rvt, rvk) +} + +// decNegintPosintFloatNumberHelper is used for formats that are binary +// and have distinct ways of storing positive integers vs negative integers +// vs floats, which are uniquely identified by the byte descriptor. +// +// Currently, these formats are binc, cbor and simple. +type decNegintPosintFloatNumberHelper struct { + d decDriverI +} + +func (x decNegintPosintFloatNumberHelper) uint64(ui uint64, neg, ok bool) uint64 { + if ok && !neg { + return ui + } + return x.uint64TryFloat(ok) +} + +func (x decNegintPosintFloatNumberHelper) uint64TryFloat(neg bool) (ui uint64) { + if neg { // neg = true + halt.errorStr("assigning negative signed value to unsigned type") + } + f, ok := x.d.decFloat() + if !(ok && f >= 0 && noFrac64(math.Float64bits(f))) { + halt.errorStr2("invalid number loading uint64, with descriptor: ", x.d.descBd()) + } + return uint64(f) +} + +func (x decNegintPosintFloatNumberHelper) int64(ui uint64, neg, ok, cbor bool) (i int64) { + if ok { + return decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor) + } + // return x.int64TryFloat() + // } + // func (x decNegintPosintFloatNumberHelper) int64TryFloat() (i int64) { + f, ok := x.d.decFloat() + if !(ok && noFrac64(math.Float64bits(f))) { + halt.errorf("invalid number loading uint64 (%v), with descriptor: %s", f, x.d.descBd()) + } + return int64(f) +} + +func (x decNegintPosintFloatNumberHelper) float64(f float64, ok, cbor bool) float64 { + if ok { + return f + } + return x.float64TryInteger(cbor) +} + +func (x decNegintPosintFloatNumberHelper) float64TryInteger(cbor bool) float64 { + ui, neg, ok := x.d.decInteger() + if !ok { + halt.errorStr2("invalid descriptor for float: ", x.d.descBd()) + } + return float64(decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor)) +} + +func decNegintPosintFloatNumberHelperInt64v(ui uint64, neg, incrIfNeg bool) (i int64) { + if neg && incrIfNeg { + ui++ + } + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } + return +} + +// isDecodeable checks if value can be decoded into +// +// decode can take any reflect.Value that is a inherently addressable i.e. +// - non-nil chan (we will SEND to it) +// - non-nil slice (we will set its elements) +// - non-nil map (we will put into it) +// - non-nil pointer (we can "update" it) +// - func: no +// - interface: no +// - array: if canAddr=true +// - any other value pointer: if canAddr=true +func isDecodeable(rv reflect.Value) (canDecode bool, reason decNotDecodeableReason) { + switch rv.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map: + canDecode = !rvIsNil(rv) + reason = decNotDecodeableReasonNilReference + case reflect.Func, reflect.Interface, reflect.Invalid, reflect.UnsafePointer: + reason = decNotDecodeableReasonBadKind + default: + canDecode = rv.CanAddr() + reason = decNotDecodeableReasonNonAddrValue + } + return +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen int, maxlen, unit uint) (n uint) { + // anecdotal testing showed increase in allocation with map length of 16. + // We saw same typical alloc from 0-8, then a 20% increase at 16. + // Thus, we set it to 8. + + const ( + minLenIfUnset = 8 + maxMem = 1024 * 1024 // 1 MB Memory + ) + + // handle when maxlen is not set i.e. <= 0 + + // clen==0: use 0 + // maxlen<=0, clen<0: use default + // maxlen> 0, clen<0: use default + // maxlen<=0, clen>0: infer maxlen, and cap on it + // maxlen> 0, clen>0: cap at maxlen + + if clen == 0 || clen == containerLenNil { + return 0 + } + if clen < 0 { + // if unspecified, return 64 for bytes, ... 8 for uint64, ... and everything else + return max(64/unit, minLenIfUnset) + } + if unit == 0 { + return uint(clen) + } + if maxlen == 0 { + maxlen = maxMem / unit + } + return min(uint(clen), maxlen) +} + +type Decoder struct { + decoderI +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle +// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + return &Decoder{h.newDecoder(r)} +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + return &Decoder{h.newDecoderBytes(in)} +} + +// NewDecoderString returns a Decoder which efficiently decodes directly +// from a string with zero copying. +// +// It is a convenience function that calls NewDecoderBytes with a +// []byte view into the string. +// +// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag. +func NewDecoderString(s string, h Handle) *Decoder { + return NewDecoderBytes(bytesView(s), h) +} + +// ---- + +func sideDecode(h Handle, p *sync.Pool, fn func(decoderI)) { + var s decoderI + if usePoolForSideDecode { + s = p.Get().(decoderI) + defer p.Put(s) + } else { + // initialization cycle error + // s = NewDecoderBytes(nil, h).decoderI + s = p.New().(decoderI) + } + fn(s) +} + +func oneOffDecode(sd decoderI, v interface{}, in []byte, basetype reflect.Type, ext bool) { + sd.ResetBytes(in) + sd.decodeAs(v, basetype, ext) + // d.sideDecoder(xbs) + // d.sideDecode(rv, basetype) +} + +func bytesOKdbi(v []byte, _ dBytesIntoState) []byte { + return v +} + +func bytesOKs(bs []byte, _ dBytesAttachState) []byte { + return bs +} diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go index f98c8ff2d..eedea89ee 100644 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -1,3 +1,5 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -5,473 +7,163 @@ package codec import ( "encoding" - "errors" "io" - "math" "reflect" "strconv" + "sync" "time" ) -const msgBadDesc = "unrecognized descriptor byte" +type helperDecDriver[T decDriver] struct{} -const ( - decDefMaxDepth = 1024 // maximum depth - decDefChanCap = 64 // should be large, as cap cannot be expanded - decScratchByteArrayLen = (8 + 2 + 2 + 1) * 8 // around cacheLineSize ie ~64, depending on Decoder size - - // MARKER: massage decScratchByteArrayLen to ensure xxxDecDriver structs fit within cacheLine*N - - // decFailNonEmptyIntf configures whether we error - // when decoding naked into a non-empty interface. - // - // Typically, we cannot decode non-nil stream value into - // nil interface with methods (e.g. io.Reader). - // However, in some scenarios, this should be allowed: - // - MapType - // - SliceType - // - Extensions - // - // Consequently, we should relax this. Put it behind a const flag for now. - decFailNonEmptyIntf = false - - // decUseTransient says that we should not use the transient optimization. - // - // There's potential for GC corruption or memory overwrites if transient isn't - // used carefully, so this flag helps turn it off quickly if needed. - // - // Use it everywhere needed so we can completely remove unused code blocks. - decUseTransient = true -) - -var ( - errNeedMapOrArrayDecodeToStruct = errors.New("only encoded map or array can decode into struct") - errCannotDecodeIntoNil = errors.New("cannot decode into nil") - - errExpandSliceCannotChange = errors.New("expand slice: cannot change") - - errDecoderNotInitialized = errors.New("Decoder not initialized") - - errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") - errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") - errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") - errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") -) - -// decByteState tracks where the []byte returned by the last call -// to DecodeBytes or DecodeStringAsByte came from -type decByteState uint8 - -const ( - decByteStateNone decByteState = iota - decByteStateZerocopy // view into []byte that we are decoding from - decByteStateReuseBuf // view into transient buffer used internally by decDriver - // decByteStateNewAlloc -) - -type decNotDecodeableReason uint8 - -const ( - decNotDecodeableReasonUnknown decNotDecodeableReason = iota - decNotDecodeableReasonBadKind - decNotDecodeableReasonNonAddrValue - decNotDecodeableReasonNilReference -) - -type decDriver interface { - // this will check if the next token is a break. - CheckBreak() bool - - // TryNil tries to decode as nil. - // If a nil is in the stream, it consumes it and returns true. - // - // Note: if TryNil returns true, that must be handled. - TryNil() bool - - // ContainerType returns one of: Bytes, String, Nil, Slice or Map. - // - // Return unSet if not known. - // - // Note: Implementations MUST fully consume sentinel container types, specifically Nil. - ContainerType() (vt valueType) - - // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. - // For maps and arrays, it will not do the decoding in-band, but will signal - // the decoder, so that is done later, by setting the fauxUnion.valueType field. - // - // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - // for extensions, DecodeNaked must read the tag and the []byte if it exists. - // if the []byte is not read, then kInterfaceNaked will treat it as a Handle - // that stores the subsequent value in-band, and complete reading the RawExt. - // - // extensions should also use readx to decode them, for efficiency. - // kInterface will extract the detached byte slice if it has to pass it outside its realm. - DecodeNaked() - - DecodeInt64() (i int64) - DecodeUint64() (ui uint64) - - DecodeFloat64() (f float64) - DecodeBool() (b bool) - - // DecodeStringAsBytes returns the bytes representing a string. - // It will return a view into scratch buffer or input []byte (if applicable). - // - // Note: This can also decode symbols, if supported. - // - // Users should consume it right away and not store it for later use. - DecodeStringAsBytes() (v []byte) - - // DecodeBytes returns the bytes representing a binary value. - // It will return a view into scratch buffer or input []byte (if applicable). - // - // All implementations must honor the contract below: - // if ZeroCopy and applicable, return a view into input []byte we are decoding from - // else if in == nil, return a view into scratch buffer - // else append decoded value to in[:0] and return that - // (this can be simulated by passing []byte{} as in parameter) - // - // Implementations must also update Decoder.decByteState on each call to - // DecodeBytes or DecodeStringAsBytes. Some callers may check that and work appropriately. - // - // Note: DecodeBytes may decode past the length of the passed byte slice, up to the cap. - // Consequently, it is ok to pass a zero-len slice to DecodeBytes, as the returned - // byte slice will have the appropriate length. - DecodeBytes(in []byte) (out []byte) - // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) - - // DecodeExt will decode into a *RawExt or into an extension. - DecodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) - // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - - DecodeTime() (t time.Time) - - // ReadArrayStart will return the length of the array. - // If the format doesn't prefix the length, it returns containerLenUnknown. - // If the expected array was a nil in the stream, it returns containerLenNil. - ReadArrayStart() int - - // ReadMapStart will return the length of the array. - // If the format doesn't prefix the length, it returns containerLenUnknown. - // If the expected array was a nil in the stream, it returns containerLenNil. - ReadMapStart() int - - reset() - - // atEndOfDecode() - - // nextValueBytes will return the bytes representing the next value in the stream. - // - // if start is nil, then treat it as a request to discard the next set of bytes, - // and the return response does not matter. - // Typically, this means that the returned []byte is nil/empty/undefined. - // - // Optimize for decoding from a []byte, where the nextValueBytes will just be a sub-slice - // of the input slice. Callers that need to use this to not be a view into the input bytes - // should handle it appropriately. - nextValueBytes(start []byte) []byte - - // descBd will describe the token descriptor that signifies what type was decoded - descBd() string - - decoder() *Decoder - - driverStateManager - decNegintPosintFloatNumber +// decFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type decFn[T decDriver] struct { + i decFnInfo + fd func(*decoder[T], *decFnInfo, reflect.Value) + // _ [1]uint64 // padding (cache-aligned) } -type decDriverContainerTracker interface { - ReadArrayElem() - ReadMapElemKey() - ReadMapElemValue() - ReadArrayEnd() - ReadMapEnd() +type decRtidFn[T decDriver] struct { + rtid uintptr + fn *decFn[T] } -type decNegintPosintFloatNumber interface { - decInteger() (ui uint64, neg, ok bool) - decFloat() (f float64, ok bool) +// ---- + +// Decoder reads and decodes an object from an input stream in a supported format. +// +// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Decoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type decoder[T decDriver] struct { + dh helperDecDriver[T] + fp *fastpathDs[T] + d T + decoderBase } -type decDriverNoopNumberHelper struct{} - -func (x decDriverNoopNumberHelper) decInteger() (ui uint64, neg, ok bool) { - panic("decInteger unsupported") -} -func (x decDriverNoopNumberHelper) decFloat() (f float64, ok bool) { panic("decFloat unsupported") } - -type decDriverNoopContainerReader struct{} - -// func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { panic("ReadArrayStart unsupported") } -// func (x decDriverNoopContainerReader) ReadMapStart() (v int) { panic("ReadMapStart unsupported") } -func (x decDriverNoopContainerReader) ReadArrayEnd() {} -func (x decDriverNoopContainerReader) ReadMapEnd() {} -func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } - -// DecodeOptions captures configuration options during decode. -type DecodeOptions struct { - // MapType specifies type to use during schema-less decoding of a map in the stream. - // If nil (unset), we default to map[string]interface{} iff json handle and MapKeyAsString=true, - // else map[interface{}]interface{}. - MapType reflect.Type - - // SliceType specifies type to use during schema-less decoding of an array in the stream. - // If nil (unset), we default to []interface{} for all formats. - SliceType reflect.Type - - // MaxInitLen defines the maxinum initial length that we "make" a collection - // (string, slice, map, chan). If 0 or negative, we default to a sensible value - // based on the size of an element in the collection. - // - // For example, when decoding, a stream may say that it has 2^64 elements. - // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. - // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. - MaxInitLen int - - // ReaderBufferSize is the size of the buffer used when reading. - // - // if > 0, we use a smart buffer internally for performance purposes. - ReaderBufferSize int - - // MaxDepth defines the maximum depth when decoding nested - // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). - MaxDepth int16 - - // If ErrorIfNoField, return an error when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool - - // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. - // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, - // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). - ErrorIfNoArrayExpand bool - - // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). - SignedInteger bool - - // MapValueReset controls how we decode into a map value. - // - // By default, we MAY retrieve the mapping for a key, and then decode into that. - // However, especially with big maps, that retrieval may be expensive and unnecessary - // if the stream already contains all that is necessary to recreate the value. - // - // If true, we will never retrieve the previous mapping, - // but rather decode into a new value and set that in the map. - // - // If false, we will retrieve the previous mapping if necessary e.g. - // the previous mapping is a pointer, or is a struct or array with pre-set state, - // or is an interface. - MapValueReset bool - - // SliceElementReset: on decoding a slice, reset the element to a zero value first. - // - // concern: if the slice already contained some garbage, we will decode into that garbage. - SliceElementReset bool - - // InterfaceReset controls how we decode into an interface. - // - // By default, when we see a field that is an interface{...}, - // or a map with interface{...} value, we will attempt decoding into the - // "contained" value. - // - // However, this prevents us from reading a string into an interface{} - // that formerly contained a number. - // - // If true, we will decode into a new "blank" value, and set that in the interface. - // If false, we will decode into whatever is contained in the interface. - InterfaceReset bool - - // InternString controls interning of strings during decoding. - // - // Some handles, e.g. json, typically will read map keys as strings. - // If the set of keys are finite, it may help reduce allocation to - // look them up from a map (than to allocate them afresh). - // - // Note: Handles will be smart when using the intern functionality. - // Every string should not be interned. - // An excellent use-case for interning is struct field names, - // or map keys where key type is string. - InternString bool - - // PreferArrayOverSlice controls whether to decode to an array or a slice. - // - // This only impacts decoding into a nil interface{}. - // - // Consequently, it has no effect on codecgen. - // - // *Note*: This only applies if using go1.5 and above, - // as it requires reflect.ArrayOf support which was absent before go1.5. - PreferArrayOverSlice bool - - // DeleteOnNilMapValue controls how to decode a nil value in the stream. - // - // If true, we will delete the mapping of the key. - // Else, just set the mapping to the zero value of the type. - // - // Deprecated: This does NOTHING and is left behind for compiling compatibility. - // This change is necessitated because 'nil' in a stream now consistently - // means the zero value (ie reset the value to its zero state). - DeleteOnNilMapValue bool - - // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. - // By default, they are decoded as []byte, but can be decoded as string (if configured). - RawToString bool - - // ZeroCopy controls whether decoded values of []byte or string type - // point into the input []byte parameter passed to a NewDecoderBytes/ResetBytes(...) call. - // - // To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer), - // then a []byte or string in the output result may just be a slice of (point into) - // the input bytes. - // - // This optimization prevents unnecessary copying. - // - // However, it is made optional, as the caller MUST ensure that the input parameter []byte is - // not modified after the Decode() happens, as any changes are mirrored in the decoded result. - ZeroCopy bool - - // PreferPointerForStructOrArray controls whether a struct or array - // is stored in a nil interface{}, or a pointer to it. - // - // This mostly impacts when we decode registered extensions. - PreferPointerForStructOrArray bool - - // ValidateUnicode controls will cause decoding to fail if an expected unicode - // string is well-formed but include invalid codepoints. - // - // This could have a performance impact. - ValidateUnicode bool +func (d *decoder[T]) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) } -// ---------------------------------------- - -func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { - d.d.DecodeExt(rv2i(rv), f.ti.rt, 0, nil) -} - -func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) ext(f *decFnInfo, rv reflect.Value) { d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) } -func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { - rv2i(rv).(Selfer).CodecDecodeSelf(d) +func (d *decoder[T]) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) } -func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { bm := rv2i(rv).(encoding.BinaryUnmarshaler) - xbs := d.d.DecodeBytes(nil) + xbs, _ := d.d.DecodeBytes() fnerr := bm.UnmarshalBinary(xbs) - d.onerror(fnerr) + halt.onerror(fnerr) } -func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) textUnmarshal(_ *decFnInfo, rv reflect.Value) { tm := rv2i(rv).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) - d.onerror(fnerr) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) } -func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) } -func (d *Decoder) jsonUnmarshalV(tm jsonUnmarshaler) { +func (d *decoder[T]) jsonUnmarshalV(tm jsonUnmarshaler) { // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - var bs0 = []byte{} - if !d.bytes { - bs0 = d.blist.get(256) - } - bs := d.d.nextValueBytes(bs0) - fnerr := tm.UnmarshalJSON(bs) - if !d.bytes { - d.blist.put(bs) - if !byteSliceSameData(bs0, bs) { - d.blist.put(bs0) - } - } - d.onerror(fnerr) + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) } -func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { - d.errorf("no decoding function defined for kind %v", rv.Kind()) +func (d *decoder[T]) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + // halt.errorStr2("no decoding function defined for kind: ", rv.Kind().String()) } -func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) raw(_ *decFnInfo, rv reflect.Value) { rvSetBytes(rv, d.rawBytes()) } -func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { - rvSetString(rv, d.stringZC(d.d.DecodeStringAsBytes())) +func (d *decoder[T]) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) } -func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kBool(_ *decFnInfo, rv reflect.Value) { rvSetBool(rv, d.d.DecodeBool()) } -func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kTime(_ *decFnInfo, rv reflect.Value) { rvSetTime(rv, d.d.DecodeTime()) } -func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { - rvSetFloat32(rv, d.decodeFloat32()) +func (d *decoder[T]) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) } -func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kFloat64(_ *decFnInfo, rv reflect.Value) { rvSetFloat64(rv, d.d.DecodeFloat64()) } -func (d *Decoder) kComplex64(f *codecFnInfo, rv reflect.Value) { - rvSetComplex64(rv, complex(d.decodeFloat32(), 0)) +func (d *decoder[T]) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) } -func (d *Decoder) kComplex128(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kComplex128(_ *decFnInfo, rv reflect.Value) { rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) } -func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kInt(_ *decFnInfo, rv reflect.Value) { rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) } -func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kInt8(_ *decFnInfo, rv reflect.Value) { rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) } -func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kInt16(_ *decFnInfo, rv reflect.Value) { rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) } -func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kInt32(_ *decFnInfo, rv reflect.Value) { rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) } -func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kInt64(_ *decFnInfo, rv reflect.Value) { rvSetInt64(rv, d.d.DecodeInt64()) } -func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kUint(_ *decFnInfo, rv reflect.Value) { rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) } -func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kUintptr(_ *decFnInfo, rv reflect.Value) { rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) } -func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kUint8(_ *decFnInfo, rv reflect.Value) { rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) } -func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kUint16(_ *decFnInfo, rv reflect.Value) { rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) } -func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kUint32(_ *decFnInfo, rv reflect.Value) { rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) } -func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kUint64(_ *decFnInfo, rv reflect.Value) { rvSetUint64(rv, d.d.DecodeUint64()) } -func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { +func (d *decoder[T]) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { // nil interface: // use some hieristics to decode it appropriately // based on the detected next value in the stream. @@ -486,8 +178,12 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { // // Consequently, we should relax this. Put it behind a const flag for now. if decFailNonEmptyIntf && f.ti.numMeth > 0 { - d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) } + + // We generally make a pointer to the container here, and pass along, + // so that they will be initialized later when we know the length of the collection. + switch n.v { case valueTypeMap: mtid := d.mtid @@ -511,6 +207,9 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { d.decode(rv2i(rvn)) rvn = rvn.Elem() } else { + // // made map is fully initialized for direct modification. + // // There's no need to make a pointer to it first. + // rvn = makeMapReflect(d.h.MapType, 0) rvn = rvZeroAddrK(d.h.MapType, reflect.Map) d.decodeValue(rvn, nil) } @@ -527,7 +226,7 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) d.decodeValue(rvn, nil) } - if reflectArrayOfSupported && d.h.PreferArrayOverSlice { + if d.h.PreferArrayOverSlice { rvn = rvGetArray4Slice(rvn) } case valueTypeExt: @@ -535,20 +234,18 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { bfn := d.h.getExtForTag(tag) var re = RawExt{Tag: tag} if bytes == nil { - // it is one of the InterfaceExt ones: json and cbor. - // most likely cbor, as json decoding never reveals valueTypeExt (no tagging support) + // one of the InterfaceExt ones: json and cbor. + // (likely cbor, as json has no tagging support and won't reveal valueTypeExt) if bfn == nil { d.decode(&re.Value) rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) } else { - if bfn.ext == SelfExt { - rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) - d.decodeValue(rvn, d.h.fnNoExt(bfn.rt)) - } else { - rvn = reflect.New(bfn.rt) - d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) - rvn = rvn.Elem() - } + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() } } else { // one of the BytesExt ones: binc, msgpack, simple @@ -558,7 +255,7 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { } else { rvn = reflect.New(bfn.rt) if bfn.ext == SelfExt { - d.sideDecode(rv2i(rvn), bfn.rt, bytes) + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) } else { bfn.ext.ReadExt(rv2i(rvn), bytes) } @@ -589,12 +286,12 @@ func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { case valueTypeTime: rvn = n.rt() default: - halt.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) } return } -func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kInterface(f *decFnInfo, rv reflect.Value) { // Note: A consequence of how kInterface works, is that // if an interface already contains something, we try // to decode into what was there before. @@ -653,30 +350,78 @@ func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { rvSetIntf(rv, rvn) } -func decStructFieldKeyNotString(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { - if keyType == valueTypeInt { - rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) - } else if keyType == valueTypeUint { - rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) - } else if keyType == valueTypeFloat { - rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) - } else { - halt.errorf("invalid struct key type: %v", keyType) - } - return -} - -func (d *Decoder) kStructField(si *structFieldInfo, rv reflect.Value) { +func (d *decoder[T]) kStructField(si *structFieldInfo, rv reflect.Value) { if d.d.TryNil() { - if rv = si.path.field(rv); rv.IsValid() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { decSetNonNilRV2Zero(rv) } - return + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) } - d.decodeValueNoCheckNil(si.path.fieldAlloc(rv), nil) } -func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d // early asserts d, d.d are not nil once + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + // Not much gain from doing it two ways for array (used less frequently than structs). + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + // iterate all the items in the stream. + // - if mapped elem-wise to a field, handle it + // - if more stream items than can be mapped, error it + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoder[T]) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d // early asserts d, d.d are not nil once ctyp := d.d.ContainerType() ti := f.ti var mf MissingFielder @@ -693,18 +438,24 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { } hasLen := containerLen >= 0 var name2 []byte - if mf != nil { - var namearr2 [16]byte - name2 = namearr2[:0] - } var rvkencname []byte + tkt := ti.keyType for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - if ti.keyType == valueTypeString { - rvkencname = d.d.DecodeStringAsBytes() + d.mapElemKey(j == 0) + // use if-else since <8 branches and we need good branch prediction for string + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) } else { - rvkencname = decStructFieldKeyNotString(d.d, ti.keyType, &d.b) + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) } + d.mapElemValue() if si := ti.siForEncName(rvkencname); si != nil { d.kStructField(si, rv) @@ -714,7 +465,7 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { var f interface{} d.decode(&f) if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { - d.errorf("no matching struct field when decoding stream map with key: %s ", stringView(name2)) + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) } } else { d.structFieldNotFound(-1, stringView(rvkencname)) @@ -736,7 +487,7 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { // if mapped elem-wise to a field, handle it // if more stream items than can be mapped, error it for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.arrayElem() + d.arrayElem(j == 0) if j < len(tisfi) { d.kStructField(tisfi[j], rv) } else { @@ -746,11 +497,12 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { d.arrayEnd() } else { - d.onerror(errNeedMapOrArrayDecodeToStruct) + halt.onerror(errNeedMapOrArrayDecodeToStruct) } } -func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d // early asserts d, d.d are not nil once // A slice can be set from a map or array in stream. // This way, the order can be kept (as order is lost with map). @@ -763,26 +515,31 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { if ctyp == valueTypeBytes || ctyp == valueTypeString { // you can only decode bytes or string in the stream into a slice or array of bytes if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { - d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) } rvbs := rvGetBytes(rv) - if !rvCanset { - // not addressable byte slice, so do not decode into it past the length - rvbs = rvbs[:len(rvbs):len(rvbs)] - } - bs2 := d.decodeBytesInto(rvbs) - // if !(len(bs2) == len(rvbs) && byteSliceSameData(rvbs, bs2)) { - if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { - if rvCanset { + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { rvSetBytes(rv, bs2) - } else if len(rvbs) > 0 && len(bs2) > 0 { - copy(rvbs, bs2) } + } else { + // not addressable byte slice, so do not decode into it past the length + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) } return } - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - never Nil + // only expects valueType(Array|Map) - never Nil + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } // an array can never return a nil slice. so no need to check f.array here. if containerLenS == 0 { @@ -793,7 +550,11 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rvSetSliceLen(rv, 0) } } - slh.End() + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } return } @@ -804,7 +565,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rtelem = rtelem.Elem() } - var fn *codecFn + var fn *decFn[T] var rvChanged bool @@ -813,11 +574,12 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rvlen := rvLenSlice(rv) rvcap := rvCapSlice(rv) - hasLen := containerLenS > 0 + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 if hasLen { if containerLenS > rvcap { oldRvlenGtZero := rvlen > 0 - rvlen1 := decInferLen(containerLenS, d.h.MaxInitLen, int(ti.elemsize)) + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) if rvlen1 == rvlen { } else if rvlen1 <= rvcap { if rvCanset { @@ -830,7 +592,7 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rvcap = rvlen rvChanged = !rvCanset } else { // rvlen1 > rvcap && !canSet - d.errorf("cannot decode into non-settable slice") + halt.errorStr("cannot decode into non-settable slice") } if rvChanged && oldRvlenGtZero && rtelem0Mut { rvCopySlice(rv, rv0, rtelem) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) @@ -846,27 +608,48 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { // consider creating new element once, and just decoding into it. var elemReset = d.h.SliceElementReset - var j int + // when decoding into slices, there may be more values in the stream than the slice length. + // decodeValue handles this better when coming from an addressable value (known to reflect.Value). + // Consequently, builtin handling skips slices. + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int for ; d.containerNext(j, containerLenS, hasLen); j++ { if j == 0 { if rvIsNil(rv) { // means hasLen = false if rvCanset { - rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(ti.elemsize)) + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) rvcap = rvlen rvChanged = !rvCanset } else { - d.errorf("cannot decode into non-settable slice") + halt.errorStr("cannot decode into non-settable slice") } } if fn == nil { - fn = d.h.fn(rtelem) + fn = d.fn(rtelem) } } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + // if indefinite, etc, then expand the slice if necessary if j >= rvlen { - slh.ElemContainerState(j) // expand the slice up to the cap. // Note that we did, so we have to reset it later. @@ -878,24 +661,41 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { } else if rvChanged { rv = rvSlice(rv, rvlen) } else { - d.onerror(errExpandSliceCannotChange) + halt.onerror(errExpandSliceCannotChange) } } else { if !(rvCanset || rvChanged) { - d.onerror(errExpandSliceCannotChange) + halt.onerror(errExpandSliceCannotChange) } rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + // note: 1 requested is hint/minimum - new capacity with more space rvlen = rvcap rvChanged = !rvCanset } - } else { - slh.ElemContainerState(j) } - rv9 = rvSliceIndex(rv, j, f.ti) + + // we check if we can make this an addr, and do builtin + // e.g. if []ints, then fastpath should handle it? + // but if not, we should treat it as each element is *int, and decode into it. + + rv9 = rvArrayIndex(rv, j, f.ti, true) if elemReset { rvSetZero(rv9) } - d.decodeValue(rv9, fn) + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) // d.decode(rv2i(rv9.Addr())) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } } if j < rvlen { if rvCanset { @@ -911,92 +711,139 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rvChanged = true } } - slh.End() + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } if rvChanged { // infers rvCanset=true, so it can be reset rvSetDirect(rv0, rv) } } -func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d // early asserts d, d.d are not nil once // An array can be set from a map or array in stream. - + ti := f.ti ctyp := d.d.ContainerType() if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { // you can only decode bytes or string in the stream into a slice or array of bytes - if f.ti.elemkind != uint8(reflect.Uint8) { - d.errorf("bytes/string in stream can decode into array of bytes, but not %v", f.ti.rt) + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) } rvbs := rvGetArrayBytes(rv, nil) - bs2 := d.decodeBytesInto(rvbs) - if !byteSliceSameData(rvbs, bs2) && len(rvbs) > 0 && len(bs2) > 0 { - copy(rvbs, bs2) - } + d.decodeBytesInto(rvbs, true) return } - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - never Nil + // only expects valueType(Array|Map) - never Nil + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } // an array can never return a nil slice. so no need to check f.array here. if containerLenS == 0 { - slh.End() + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } return } - rtelem := f.ti.elem - for k := reflect.Kind(f.ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { rtelem = rtelem.Elem() } - var fn *codecFn - var rv9 reflect.Value rvlen := rv.Len() // same as cap - hasLen := containerLenS > 0 + hasLen := containerLenS >= 0 if hasLen && containerLenS > rvlen { - d.errorf("cannot decode into array with length: %v, less than container length: %v", rvlen, containerLenS) + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) } // consider creating new element once, and just decoding into it. var elemReset = d.h.SliceElementReset + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFn[T] + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } // note that you cannot expand the array if indefinite and we go past array length if j >= rvlen { - slh.arrayCannotExpand(hasLen, rvlen, j, containerLenS) - return + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue } - slh.ElemContainerState(j) - rv9 = rvArrayIndex(rv, j, f.ti) + rv9 = rvArrayIndex(rv, j, f.ti, false) if elemReset { rvSetZero(rv9) } - - if fn == nil { - fn = d.h.fn(rtelem) + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) // d.decode(rv2i(rv9.Addr())) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) } - d.decodeValue(rv9, fn) } - slh.End() + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } } -func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d // early asserts d, d.d are not nil once // A slice can be set from a map or array in stream. // This way, the order can be kept (as order is lost with map). ti := f.ti if ti.chandir&uint8(reflect.SendDir) == 0 { - d.errorf("receive-only channel cannot be decoded") + halt.errorStr("receive-only channel cannot be decoded") } ctyp := d.d.ContainerType() if ctyp == valueTypeBytes || ctyp == valueTypeString { // you can only decode bytes or string in the stream into a slice or array of bytes if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { - d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) } - bs2 := d.d.DecodeBytes(nil) + bs2, _ := d.d.DecodeBytes() irv := rv2i(rv) ch, ok := irv.(chan<- byte) if !ok { @@ -1010,15 +857,27 @@ func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) { var rvCanset = rv.CanSet() - // only expects valueType(Array|Map - nil handled above) - slh, containerLenS := d.decSliceHelperStart() + // only expects valueType(Array|Map) - never Nil + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } // an array can never return a nil slice. so no need to check f.array here. if containerLenS == 0 { if rvCanset && rvIsNil(rv) { rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) } - slh.End() + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } return } @@ -1029,20 +888,21 @@ func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) { rtelem = rtelem.Elem() } - var fn *codecFn + var fn *decFn[T] var rvChanged bool var rv0 = rv var rv9 reflect.Value var rvlen int // = rv.Len() - hasLen := containerLenS > 0 + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { if j == 0 { if rvIsNil(rv) { if hasLen { - rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(ti.elemsize)) + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) } else { rvlen = decDefChanCap } @@ -1050,17 +910,25 @@ func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) { rv = reflect.MakeChan(ti.rt, rvlen) rvChanged = true } else { - d.errorf("cannot decode into non-settable chan") + halt.errorStr("cannot decode into non-settable chan") } } if fn == nil { - fn = d.h.fn(rtelem) + fn = d.fn(rtelem) } } - slh.ElemContainerState(j) + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if rv9.IsValid() { rvSetZero(rv9) - } else if decUseTransient && useTransient { + } else if useTransient { rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) } else { rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) @@ -1070,7 +938,11 @@ func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) { } rv.Send(rv9) } - slh.End() + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } if rvChanged { // infers rvCanset=true, so it can be reset rvSetDirect(rv0, rv) @@ -1078,11 +950,12 @@ func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) { } -func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { +func (d *decoder[T]) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d // early asserts d, d.d are not nil once containerLen := d.mapStart(d.d.ReadMapStart()) ti := f.ti if rvIsNil(rv) { - rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.keysize+ti.elemsize)) + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) } @@ -1095,19 +968,21 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { ktypeId := rt2id(ktype) vtypeKind := reflect.Kind(ti.elemkind) ktypeKind := reflect.Kind(ti.keykind) - kfast := mapKeyFastKindFor(ktypeKind) - visindirect := mapStoresElemIndirect(uintptr(ti.elemsize)) - visref := refBitset.isset(ti.elemkind) + mparams := getMapReqParams(ti) + // kfast := mapKeyFastKindFor(ktypeKind) + // visindirect := mapStoresElemIndirect(uintptr(ti.elemsize)) + // visref := refBitset.isset(ti.elemkind) vtypePtr := vtypeKind == reflect.Ptr ktypePtr := ktypeKind == reflect.Ptr vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient - kTransient := decUseTransient && !ktypePtr && ti.tikey.flagCanTransient + // keys are transient iff values are transient first + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient var vtypeElem reflect.Type - var keyFn, valFn *codecFn + var keyFn, valFn *decFn[T] var ktypeLo, vtypeLo = ktype, vtype if ktypeKind == reflect.Ptr { @@ -1146,39 +1021,37 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { ktypeIsString := ktypeId == stringTypId ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 - hasLen := containerLen > 0 - - // kstrbs is used locally for the key bytes, so we can reduce allocation. - // When we read keys, we copy to this local bytes array, and use a stringView for lookup. - // We only convert it into a true string if we have to do a set on the map. - - // Since kstr2bs will usually escape to the heap, declaring a [64]byte array may be wasteful. - // It is only valuable if we are sure that it is declared on the stack. - // var kstrarr [64]byte // most keys are less than 32 bytes, and even more less than 64 - // var kstrbs = kstrarr[:0] - var kstrbs []byte var kstr2bs []byte - var s string + var kstr string - var callFnRvk bool - - fnRvk2 := func() (s string) { - callFnRvk = false - if len(kstr2bs) < 2 { - return string(kstr2bs) - } - return d.mapKeyString(&callFnRvk, &kstrbs, &kstr2bs) - } + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState // Use a possibly transient (map) value (and key), to reduce allocation + // when decoding into slices, there may be more values in the stream than the slice length. + // decodeValue handles this better when coming from an addressable value (known to reflect.Value). + // Consequently, builtin handling skips slices. + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin // && ti.elemkind != uint8(reflect.Slice) + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - callFnRvk = false + mapKeyStringSharesBytesBuf = false + kstr = "" if j == 0 { // if vtypekind is a scalar and thus value will be decoded using TransientAddrK, // then it is ok to use TransientAddr2K for the map key. - if decUseTransient && vTransient && kTransient { + if kTransient { rvk = d.perType.TransientAddr2K(ktype, ktypeKind) } else { rvk = rvZeroAddrK(ktype, ktypeKind) @@ -1187,17 +1060,17 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { rvkn = rvk } if !rvvMut { - if decUseTransient && vTransient { + if vTransient { rvvn = d.perType.TransientAddrK(vtype, vtypeKind) } else { rvvn = rvZeroAddrK(vtype, vtypeKind) } } if !ktypeIsString && keyFn == nil { - keyFn = d.h.fn(ktypeLo) + keyFn = d.fn(ktypeLo) } if valFn == nil { - valFn = d.h.fn(vtypeLo) + valFn = d.fn(vtypeLo) } } else if rvkMut { rvSetZero(rvk) @@ -1205,39 +1078,68 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { rvk = rvkn } - d.mapElemKey() - if ktypeIsString { - kstr2bs = d.d.DecodeStringAsBytes() - rvSetString(rvk, fnRvk2()) + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) } else { - d.decByteState = decByteStateNone - d.decodeValue(rvk, keyFn) + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } // special case if interface wrapping a byte slice if ktypeIsIntf { if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { kstr2bs = rvGetBytes(rvk2) - rvSetIntf(rvk, rv4istr(fnRvk2())) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) } // NOTE: consider failing early if map/slice/func } } + // TryNil will try to read from the stream and check if a nil marker. + // + // When using ioDecReader (specifically in bufio mode), this TryNil call could + // override part of the buffer used for the string key. + // + // To mitigate this, we do a special check for ioDecReader in bufio mode. + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { // ktypeIsIntf + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + d.mapElemValue() if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { // ktypeIsIntf + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } // since a map, we have to set zero value if needed if !rvvz.IsValid() { rvvz = rvZeroK(vtype, vtypeKind) } - if callFnRvk { - s = d.string(kstr2bs) - if ktypeIsString { - rvSetString(rvk, s) - } else { // ktypeIsIntf - rvSetIntf(rvk, rv4istr(s)) - } - } - mapSet(rv, rvk, rvvz, kfast, visindirect, visref) + mapSet(rv, rvk, rvvz, mparams) continue } @@ -1252,7 +1154,7 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { } else if !doMapGet { goto NEW_RVV } else { - rvv = mapGet(rv, rvk, rvva, kfast, visindirect, visref) + rvv = mapGet(rv, rvk, rvva, mparams) if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { goto NEW_RVV } @@ -1272,7 +1174,7 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { rvv = rvvn default: // make addressable (so you can set the slice/array elements, etc) - if decUseTransient && vTransient { + if vTransient { rvvn = d.perType.TransientAddrK(vtype, vtypeKind) } else { rvvn = rvZeroAddrK(vtype, vtypeKind) @@ -1286,146 +1188,75 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { NEW_RVV: if vtypePtr { rvv = reflect.New(vtypeElem) // non-nil in stream, so allocate value - } else if decUseTransient && vTransient { + } else if vTransient { rvv = d.perType.TransientAddrK(vtype, vtypeKind) } else { rvv = rvZeroAddrK(vtype, vtypeKind) } DECODE_VALUE_NO_CHECK_NIL: - d.decodeValueNoCheckNil(rvv, valFn) - - if doMapSet { - if callFnRvk { - s = d.string(kstr2bs) - if ktypeIsString { - rvSetString(rvk, s) - } else { // ktypeIsIntf - rvSetIntf(rvk, rv4istr(s)) - } + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { // ktypeIsIntf + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) } - mapSet(rv, rvk, rvv, kfast, visindirect, visref) + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) } } d.mapEnd() } -// Decoder reads and decodes an object from an input stream in a supported format. -// -// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used -// concurrently in multiple goroutines. -// -// However, as Decoder could be allocation heavy to initialize, a Reset method is provided -// so its state can be reused to decode new input streams repeatedly. -// This is the idiomatic way to use. -type Decoder struct { - panicHdl - - d decDriver - - // cache the mapTypeId and sliceTypeId for faster comparisons - mtid uintptr - stid uintptr - - h *BasicHandle - - blist bytesFreelist - - // ---- cpu cache line boundary? - decRd - - // ---- cpu cache line boundary? - n fauxUnion - - hh Handle - err error - - perType decPerType - - // used for interning strings - is internerMap - - // ---- cpu cache line boundary? - // ---- writable fields during execution --- *try* to keep in sep cache line - maxdepth int16 - depth int16 - - // Extensions can call Decode() within a current Decode() call. - // We need to know when the top level Decode() call returns, - // so we can decide whether to Release() or not. - calls uint16 // what depth in mustDecode are we in now. - - c containerState - - decByteState - - // b is an always-available scratch buffer used by Decoder and decDrivers. - // By being always-available, it can be used for one-off things without - // having to get from freelist, use, and return back to freelist. - b [decScratchByteArrayLen]byte -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle -// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - d := h.newDecDriver().decoder() - if r != nil { - d.Reset(r) - } - return d -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - d := h.newDecDriver().decoder() - if in != nil { - d.ResetBytes(in) - } - return d -} - -// NewDecoderString returns a Decoder which efficiently decodes directly -// from a string with zero copying. -// -// It is a convenience function that calls NewDecoderBytes with a -// []byte view into the string. -// -// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag. -func NewDecoderString(s string, h Handle) *Decoder { - return NewDecoderBytes(bytesView(s), h) -} - -func (d *Decoder) HandleName() string { - return d.hh.Name() -} - -func (d *Decoder) r() *decRd { - return &d.decRd -} - -func (d *Decoder) init(h Handle) { +func (d *decoder[T]) init(h Handle) { initHandle(h) - d.cbreak = d.js || d.cbor - d.bytes = true - d.err = errDecoderNotInitialized - d.h = h.getBasicHandle() + callMake(&d.d) d.hh = h - d.be = h.isBinary() + d.h = h.getBasicHandle() + // d.zeroCopy = d.h.ZeroCopy + // d.be = h.isBinary() + d.err = errDecoderNotInitialized + if d.h.InternString && d.is == nil { d.is.init() } + + // d.fp = fastpathDList[T]() + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDs[T]) // should set js, cbor, bytes, etc + + // d.cbreak = d.js || d.cbor + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() } -func (d *Decoder) resetCommon() { +func (d *decoder[T]) reset() { d.d.reset() d.err = nil d.c = 0 - d.decByteState = decByteStateNone d.depth = 0 d.calls = 0 // reset all things which were cached from the Handle, but could change @@ -1439,39 +1270,42 @@ func (d *Decoder) resetCommon() { d.str = false if d.h.MapType != nil { d.mtid = rt2id(d.h.MapType) - d.mtr = fastpathAvIndex(d.mtid) != -1 + _, d.mtr = fastpathAvIndex(d.mtid) } if d.h.SliceType != nil { d.stid = rt2id(d.h.SliceType) - d.str = fastpathAvIndex(d.stid) != -1 + _, d.str = fastpathAvIndex(d.stid) } } // Reset the Decoder with a new Reader to decode from, // clearing all state from last run(s). -func (d *Decoder) Reset(r io.Reader) { +func (d *decoder[T]) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() if r == nil { r = &eofReader } - d.bytes = false - if d.ri == nil { - d.ri = new(ioDecReader) - } - d.ri.reset(r, d.h.ReaderBufferSize, &d.blist) - d.decReader = d.ri - d.resetCommon() + d.d.resetInIO(r) } // ResetBytes resets the Decoder with a new []byte to decode from, // clearing all state from last run(s). -func (d *Decoder) ResetBytes(in []byte) { - if in == nil { - in = []byte{} +func (d *decoder[T]) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) } - d.bytes = true - d.decReader = &d.rb - d.rb.reset(in) - d.resetCommon() + d.resetBytes(in) +} + +func (d *decoder[T]) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) } // ResetString resets the Decoder with a new string to decode from, @@ -1481,14 +1315,10 @@ func (d *Decoder) ResetBytes(in []byte) { // []byte view into the string. // // This can be an efficient zero-copy if using default mode i.e. without codec.safe tag. -func (d *Decoder) ResetString(s string) { +func (d *decoder[T]) ResetString(s string) { d.ResetBytes(bytesView(s)) } -func (d *Decoder) naked() *fauxUnion { - return &d.n -} - // Decode decodes the stream from reader and stores the result in the // value pointed to by v. v cannot be a nil pointer. v can also be // a reflect.Value of a pointer. @@ -1552,27 +1382,33 @@ func (d *Decoder) naked() *fauxUnion { // // Note: we allow nil values in the stream anywhere except for map keys. // A nil value in the encoded stream where a map key is expected is treated as an error. -func (d *Decoder) Decode(v interface{}) (err error) { +// +// Note that an error from a Decode call will make the Decoder unusable moving forward. +// This is because the state of the Decoder, it's input stream, etc are no longer stable. +// Any subsequent calls to Decode will trigger the same error. +func (d *decoder[T]) Decode(v interface{}) (err error) { // tried to use closure, as runtime optimizes defer with no params. // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 - if !debugging { - defer func() { - if x := recover(); x != nil { - panicValToErr(d, x, &d.err) - err = d.err - } - }() - } - - d.MustDecode(v) + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) return } // MustDecode is like Decode, but panics if unable to Decode. // // Note: This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { +// +// Note that an error from a Decode call will make the Decoder unusable moving forward. +// This is because the state of the Decoder, it's input stream, etc are no longer stable. +// Any subsequent calls to Decode will trigger the same error. +func (d *decoder[T]) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoder[T]) mustDecode(v interface{}) { halt.onerror(d.err) if d.hh == nil { halt.onerror(errNoFormatHandle) @@ -1588,139 +1424,31 @@ func (d *Decoder) MustDecode(v interface{}) { // // Deprecated: Pooled resources are not used with a Decoder. // This method is kept for compatibility reasons only. -func (d *Decoder) Release() { +func (d *decoder[T]) Release() {} + +func (d *decoder[T]) swallow() { + d.d.nextValueBytes() } -func (d *Decoder) swallow() { - d.d.nextValueBytes(nil) +func (d *decoder[T]) nextValueBytes() []byte { + return d.d.nextValueBytes() } -func (d *Decoder) swallowErr() (err error) { - if !debugging { - defer func() { - if x := recover(); x != nil { - panicValToErr(d, x, &err) - } - }() - } - d.swallow() - return -} - -func setZero(iv interface{}) { - if iv == nil { - return - } - rv, ok := isNil(iv) - if ok { - return - } - // var canDecode bool - switch v := iv.(type) { - case *string: - *v = "" - case *bool: - *v = false - case *int: - *v = 0 - case *int8: - *v = 0 - case *int16: - *v = 0 - case *int32: - *v = 0 - case *int64: - *v = 0 - case *uint: - *v = 0 - case *uint8: - *v = 0 - case *uint16: - *v = 0 - case *uint32: - *v = 0 - case *uint64: - *v = 0 - case *float32: - *v = 0 - case *float64: - *v = 0 - case *complex64: - *v = 0 - case *complex128: - *v = 0 - case *[]byte: - *v = nil - case *Raw: - *v = nil - case *time.Time: - *v = time.Time{} - case reflect.Value: - decSetNonNilRV2Zero(v) - default: - if !fastpathDecodeSetZeroTypeSwitch(iv) { - decSetNonNilRV2Zero(rv) - } - } -} - -// decSetNonNilRV2Zero will set the non-nil value to its zero value. -func decSetNonNilRV2Zero(v reflect.Value) { - // If not decodeable (settable), we do not touch it. - // We considered empty'ing it if not decodeable e.g. - // - if chan, drain it - // - if map, clear it - // - if slice or array, zero all elements up to len - // - // However, we decided instead that we either will set the - // whole value to the zero value, or leave AS IS. - - k := v.Kind() - if k == reflect.Interface { - decSetNonNilRV2Zero4Intf(v) - } else if k == reflect.Ptr { - decSetNonNilRV2Zero4Ptr(v) - } else if v.CanSet() { - rvSetDirectZero(v) - } -} - -func decSetNonNilRV2Zero4Ptr(v reflect.Value) { - ve := v.Elem() - if ve.CanSet() { - rvSetZero(ve) // we can have a pointer to an interface - } else if v.CanSet() { - rvSetZero(v) - } -} - -func decSetNonNilRV2Zero4Intf(v reflect.Value) { - ve := v.Elem() - if ve.CanSet() { - rvSetDirectZero(ve) // interfaces always have element as a non-interface - } else if v.CanSet() { - rvSetZero(v) - } -} - -func (d *Decoder) decode(iv interface{}) { +func (d *decoder[T]) decode(iv interface{}) { + _ = d.d // early asserts d, d.d are not nil once // a switch with only concrete types can be optimized. // consequently, we deal with nil and interfaces outside the switch. - if iv == nil { - d.onerror(errCannotDecodeIntoNil) + rv, ok := isNil(iv, true) // handle nil pointers also + if ok { + halt.onerror(errCannotDecodeIntoNil) } switch v := iv.(type) { // case nil: // case Selfer: - case reflect.Value: - if x, _ := isDecodeable(v); !x { - d.haltAsNotDecodeable(v) - } - d.decodeValue(v, nil) case *string: - *v = d.stringZC(d.d.DecodeStringAsBytes()) + *v = d.detach2Str(d.d.DecodeStringAsBytes()) case *bool: *v = d.d.DecodeBool() case *int: @@ -1743,22 +1471,21 @@ func (d *Decoder) decode(iv interface{}) { *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) case *uint64: *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) case *float32: - *v = d.decodeFloat32() + *v = d.d.DecodeFloat32() case *float64: *v = d.d.DecodeFloat64() case *complex64: - *v = complex(d.decodeFloat32(), 0) + *v = complex(d.d.DecodeFloat32(), 0) case *complex128: *v = complex(d.d.DecodeFloat64(), 0) case *[]byte: - *v = d.decodeBytesInto(*v) + *v, _ = d.decodeBytesInto(*v, false) case []byte: // not addressable byte slice, so do not decode into it past the length - b := d.decodeBytesInto(v[:len(v):len(v)]) - if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { // not same slice - copy(v, b) - } + d.decodeBytesInto(v[:len(v):len(v)], true) case *time.Time: *v = d.d.DecodeTime() case *Raw: @@ -1767,14 +1494,22 @@ func (d *Decoder) decode(iv interface{}) { case *interface{}: d.decodeValue(rv4iptr(v), nil) + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + default: // we can't check non-predefined types, as they might be a Selfer or extension. - if skipFastpathTypeSwitchInDirectCall || !fastpathDecodeTypeSwitch(iv, d) { - v := reflect.ValueOf(iv) - if x, _ := isDecodeable(v); !x { - d.haltAsNotDecodeable(v) + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) } - d.decodeValue(v, nil) + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) } } } @@ -1787,15 +1522,15 @@ func (d *Decoder) decode(iv interface{}) { // // Note that decodeValue will handle nil in the stream early, so that the // subsequent calls i.e. kXXX methods, etc do not have to handle it themselves. -func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn) { +func (d *decoder[T]) decodeValue(rv reflect.Value, fn *decFn[T]) { if d.d.TryNil() { decSetNonNilRV2Zero(rv) - return + } else { + d.decodeValueNoCheckNil(rv, fn) } - d.decodeValueNoCheckNil(rv, fn) } -func (d *Decoder) decodeValueNoCheckNil(rv reflect.Value, fn *codecFn) { +func (d *decoder[T]) decodeValueNoCheckNil(rv reflect.Value, fn *decFn[T]) { // If stream is not containing a nil value, then we can deref to the base // non-pointer value, and decode into that. var rvp reflect.Value @@ -1812,7 +1547,7 @@ PTR: } if fn == nil { - fn = d.h.fn(rv.Type()) + fn = d.fn(rv.Type()) } if fn.i.addrD { if rvpValid { @@ -1820,83 +1555,68 @@ PTR: } else if rv.CanAddr() { rv = rvAddr(rv, fn.i.ti.ptr) } else if fn.i.addrDf { - d.errorf("cannot decode into a non-pointer value") + halt.errorStr("cannot decode into a non-pointer value") } } fn.fd(d, &fn.i, rv) } -func (d *Decoder) structFieldNotFound(index int, rvkencname string) { - // Note: rvkencname is used only if there is an error, to pass into d.errorf. +func (d *decoder[T]) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoder[T]) structFieldNotFound(index int, rvkencname string) { + // Note: rvkencname is used only if there is an error, to pass into halt.errorf. // Consequently, it is ok to pass in a stringView // Since rvkencname may be a stringView, do NOT pass it to another function. if d.h.ErrorIfNoField { if index >= 0 { - d.errorf("no matching struct field found when decoding stream array at index %v", index) + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) } } d.swallow() } -func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { - if d.h.ErrorIfNoArrayExpand { - d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) - } -} - -func (d *Decoder) haltAsNotDecodeable(rv reflect.Value) { - if !rv.IsValid() { - d.onerror(errCannotDecodeIntoNil) - } - // check if an interface can be retrieved, before grabbing an interface - if !rv.CanInterface() { - d.errorf("cannot decode into a value without an interface: %v", rv) - } - d.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv)) -} - -func (d *Decoder) depthIncr() { - d.depth++ - if d.depth >= d.maxdepth { - d.onerror(errMaxDepthExceeded) - } -} - -func (d *Decoder) depthDecr() { - d.depth-- -} - -// Possibly get an interned version of a string, iff InternString=true and decoding a map key. -// -// This should mostly be used for map keys, where the key type is string. -// This is because keys of a map/struct are typically reused across many objects. -func (d *Decoder) string(v []byte) (s string) { - if d.is == nil || d.c != containerMapKey || len(v) < 2 || len(v) > internMaxStrLen { - return string(v) - } - return d.is.string(v) -} - -func (d *Decoder) zerocopy() bool { - return d.bytes && d.h.ZeroCopy -} - // decodeBytesInto is a convenience delegate function to decDriver.DecodeBytes. // It ensures that `in` is not a nil byte, before calling decDriver.DecodeBytes, // as decDriver.DecodeBytes treats a nil as a hint to use its internal scratch buffer. -func (d *Decoder) decodeBytesInto(in []byte) (v []byte) { - if in == nil { - in = []byte{} +func (d *decoder[T]) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + // no need to detach (since mustFit=false) + // including v has no capacity (covers v == nil and []byte{}) + return } - return d.d.DecodeBytes(in) + if len(v) == 0 { + v = zeroByteSlice // cannot be re-sliced/appended to + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return } -func (d *Decoder) rawBytes() (v []byte) { +func (d *decoder[T]) rawBytes() (v []byte) { // ensure that this is not a view into the bytes // i.e. if necessary, make new copy always. - v = d.d.nextValueBytes([]byte{}) + v = d.d.nextValueBytes() if d.bytes && !d.h.ZeroCopy { vv := make([]byte, len(v)) copy(vv, v) // using copy here triggers make+copy optimization eliding memclr @@ -1905,24 +1625,13 @@ func (d *Decoder) rawBytes() (v []byte) { return } -func (d *Decoder) wrapErr(v error, err *error) { - *err = wrapCodecErr(v, d.hh.Name(), d.NumBytesRead(), false) +func (d *decoder[T]) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) } // NumBytesRead returns the number of bytes read -func (d *Decoder) NumBytesRead() int { - return int(d.r().numread()) -} - -// decodeFloat32 will delegate to an appropriate DecodeFloat32 implementation (if exists), -// else if will call DecodeFloat64 and ensure the value doesn't overflow. -// -// Note that we return float64 to reduce unnecessary conversions -func (d *Decoder) decodeFloat32() float32 { - if d.js { - return d.jsondriver().DecodeFloat32() // custom implementation for 32-bit - } - return float32(chkOvf.Float32V(d.d.DecodeFloat64())) +func (d *decoder[T]) NumBytesRead() int { + return d.d.NumBytesRead() } // ---- container tracking @@ -1938,438 +1647,315 @@ func (d *Decoder) decodeFloat32() float32 { // - Read(Map|Array)Elem(Kay|Value) is only supported by json. // Honor these in the code, to reduce the number of interface calls (even if empty). -func (d *Decoder) checkBreak() (v bool) { - // MARKER: jsonDecDriver.CheckBreak() cannot be inlined (over budget inlining cost). - // Consequently, there's no benefit in incurring the cost of this wrapping function. - // It is faster to just call the interface method directly. - - // if d.js { - // return d.jsondriver().CheckBreak() - // } - // if d.cbor { - // return d.cbordriver().CheckBreak() - // } - - if d.cbreak { - v = d.d.CheckBreak() - } - return -} - -func (d *Decoder) containerNext(j, containerLen int, hasLen bool) bool { - // MARKER: keep in sync with gen-helper.go.tmpl - - // return (hasLen && j < containerLen) || !(hasLen || slh.d.checkBreak()) +func (d *decoder[T]) containerNext(j, containerLen int, hasLen bool) bool { + // return (hasLen && (j < containerLen)) || (!hasLen && !d.d.CheckBreak()) if hasLen { return j < containerLen } - return !d.checkBreak() + return !d.d.CheckBreak() } -func (d *Decoder) mapStart(v int) int { - if v != containerLenNil { - d.depthIncr() - d.c = containerMapStart - } - return v -} - -func (d *Decoder) mapElemKey() { - if d.js { - d.jsondriver().ReadMapElemKey() - } +func (d *decoder[T]) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) d.c = containerMapKey } -func (d *Decoder) mapElemValue() { - if d.js { - d.jsondriver().ReadMapElemValue() - } +func (d *decoder[T]) mapElemValue() { + d.d.ReadMapElemValue() d.c = containerMapValue } -func (d *Decoder) mapEnd() { - if d.js { - d.jsondriver().ReadMapEnd() - } - // d.d.ReadMapEnd() +func (d *decoder[T]) mapEnd() { + d.d.ReadMapEnd() d.depthDecr() d.c = 0 } -func (d *Decoder) arrayStart(v int) int { - if v != containerLenNil { - d.depthIncr() - d.c = containerArrayStart - } - return v -} - -func (d *Decoder) arrayElem() { - if d.js { - d.jsondriver().ReadArrayElem() - } +func (d *decoder[T]) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) d.c = containerArrayElem } -func (d *Decoder) arrayEnd() { - if d.js { - d.jsondriver().ReadArrayEnd() - } - // d.d.ReadArrayEnd() +func (d *decoder[T]) arrayEnd() { + d.d.ReadArrayEnd() d.depthDecr() d.c = 0 } -func (d *Decoder) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { - // var v interface{} = ext.ConvertExt(rv) - // d.d.decode(&v) - // ext.UpdateExt(rv, v) - - // assume v is a pointer: - // - if struct|array, pass as is to ConvertExt - // - else make it non-addressable and pass to ConvertExt - // - make return value from ConvertExt addressable - // - decode into it - // - return the interface for passing into UpdateExt. - // - interface should be a pointer if struct|array, else a value - - var s interface{} - rv := reflect.ValueOf(v) - rv2 := rv.Elem() - rvk := rv2.Kind() - if rvk == reflect.Struct || rvk == reflect.Array { - s = ext.ConvertExt(v) - } else { - s = ext.ConvertExt(rv2i(rv2)) - } - rv = reflect.ValueOf(s) - - // We cannot use isDecodeable here, as the value converted may be nil, - // or it may not be nil but is not addressable and thus we cannot extend it, etc. - // Instead, we just ensure that the value is addressable. - - if !rv.CanAddr() { - rvk = rv.Kind() - rv2 = d.oneShotAddrRV(rv.Type(), rvk) - if rvk == reflect.Interface { - rvSetIntf(rv2, rv) - } else { - rvSetDirect(rv2, rv) - } - rv = rv2 - } - - d.decodeValue(rv, nil) - ext.UpdateExt(v, rv2i(rv)) +func (d *decoder[T]) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + // The ext may support different types for performance e.g. int if no fractions, else float64 + // Consequently, best mode is: + // - decode next value into an interface{} + // - pass it to the UpdateExt + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + // rv := d.interfaceExtConvertAndDecodeGetRV(v, ext) + // d.decodeValue(rv, nil) + // ext.UpdateExt(v, rv2i(rv)) } -func (d *Decoder) sideDecode(v interface{}, basetype reflect.Type, bs []byte) { - // NewDecoderBytes(bs, d.hh).decodeValue(baseRV(v), d.h.fnNoExt(basetype)) - - defer func(rb bytesDecReader, bytes bool, - c containerState, dbs decByteState, depth int16, r decReader, state interface{}) { - d.rb = rb - d.bytes = bytes - d.c = c - d.decByteState = dbs - d.depth = depth - d.decReader = r - d.d.restoreState(state) - }(d.rb, d.bytes, d.c, d.decByteState, d.depth, d.decReader, d.d.captureState()) - - // d.rb.reset(in) - d.rb = bytesDecReader{bs[:len(bs):len(bs)], 0} - d.bytes = true - d.decReader = &d.rb - d.d.resetState() - d.c = 0 - d.decByteState = decByteStateNone - d.depth = 0 - - // must call using fnNoExt - d.decodeValue(baseRV(v), d.h.fnNoExt(basetype)) +func (d *decoder[T]) fn(t reflect.Type) *decFn[T] { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) } -func (d *Decoder) fauxUnionReadRawBytes(asString bool) { - if asString || d.h.RawToString { - d.n.v = valueTypeString - // fauxUnion is only used within DecodeNaked calls; consequently, we should try to intern. - d.n.s = d.stringZC(d.d.DecodeBytes(nil)) - } else { - d.n.v = valueTypeBytes - d.n.l = d.d.DecodeBytes([]byte{}) - } +func (d *decoder[T]) fnNoExt(t reflect.Type) *decFn[T] { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) } -func (d *Decoder) oneShotAddrRV(rvt reflect.Type, rvk reflect.Kind) reflect.Value { - if decUseTransient && - (numBoolStrSliceBitset.isset(byte(rvk)) || - ((rvk == reflect.Struct || rvk == reflect.Array) && - d.h.getTypeInfo(rt2id(rvt), rvt).flagCanTransient)) { - return d.perType.TransientAddrK(rvt, rvk) - } - return rvZeroAddrK(rvt, rvk) +// ---- + +func (helperDecDriver[T]) newDecoderBytes(in []byte, h Handle) *decoder[T] { + var c1 decoder[T] + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) // MARKER check for error + return &c1 } -// -------------------------------------------------- - -// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. -// A slice can be set from a map or array in stream. This supports the MapBySlice interface. -// -// Note: if IsNil, do not call ElemContainerState. -type decSliceHelper struct { - d *Decoder - ct valueType - Array bool - IsNil bool +func (helperDecDriver[T]) newDecoderIO(in io.Reader, h Handle) *decoder[T] { + var c1 decoder[T] + c1.init(h) + c1.Reset(in) + return &c1 } -func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { - x.ct = d.d.ContainerType() - x.d = d - switch x.ct { - case valueTypeNil: - x.IsNil = true - case valueTypeArray: - x.Array = true - clen = d.arrayStart(d.d.ReadArrayStart()) - case valueTypeMap: - clen = d.mapStart(d.d.ReadMapStart()) - clen += clen - default: - d.errorf("only encoded map or array can be decoded into a slice (%d)", x.ct) - } - return -} +// ---- -func (x decSliceHelper) End() { - if x.IsNil { - } else if x.Array { - x.d.arrayEnd() - } else { - x.d.mapEnd() - } -} - -func (x decSliceHelper) ElemContainerState(index int) { - // Note: if isnil, clen=0, so we never call into ElemContainerState - - if x.Array { - x.d.arrayElem() - } else if index&1 == 0 { // index%2 == 0 { - x.d.mapElemKey() - } else { - x.d.mapElemValue() - } -} - -func (x decSliceHelper) arrayCannotExpand(hasLen bool, lenv, j, containerLenS int) { - x.d.arrayCannotExpand(lenv, j+1) - // drain completely and return - x.ElemContainerState(j) - x.d.swallow() - j++ - for ; x.d.containerNext(j, containerLenS, hasLen); j++ { - x.ElemContainerState(j) - x.d.swallow() - } - x.End() -} - -// decNextValueBytesHelper helps with NextValueBytes calls. -// -// Typical usage: -// - each Handle's decDriver will implement a high level nextValueBytes, -// which will track the current cursor, delegate to a nextValueBytesR -// method, and then potentially call bytesRdV at the end. -// -// See simple.go for typical usage model. -type decNextValueBytesHelper struct { - d *Decoder -} - -func (x decNextValueBytesHelper) append1(v *[]byte, b byte) { - if *v != nil && !x.d.bytes { - *v = append(*v, b) - } -} - -func (x decNextValueBytesHelper) appendN(v *[]byte, b ...byte) { - if *v != nil && !x.d.bytes { - *v = append(*v, b...) - } -} - -func (x decNextValueBytesHelper) appendS(v *[]byte, b string) { - if *v != nil && !x.d.bytes { - *v = append(*v, b...) - } -} - -func (x decNextValueBytesHelper) bytesRdV(v *[]byte, startpos uint) { - if x.d.bytes { - *v = x.d.rb.b[startpos:x.d.rb.c] - } -} - -// decNegintPosintFloatNumberHelper is used for formats that are binary -// and have distinct ways of storing positive integers vs negative integers -// vs floats, which are uniquely identified by the byte descriptor. -// -// Currently, these formats are binc, cbor and simple. -type decNegintPosintFloatNumberHelper struct { - d *Decoder -} - -func (x decNegintPosintFloatNumberHelper) uint64(ui uint64, neg, ok bool) uint64 { - if ok && !neg { - return ui - } - return x.uint64TryFloat(ok) -} - -func (x decNegintPosintFloatNumberHelper) uint64TryFloat(ok bool) (ui uint64) { - if ok { // neg = true - x.d.errorf("assigning negative signed value to unsigned type") - } - f, ok := x.d.d.decFloat() - if ok && f >= 0 && noFrac64(math.Float64bits(f)) { - ui = uint64(f) - } else { - x.d.errorf("invalid number loading uint64, with descriptor: %v", x.d.d.descBd()) - } - return ui -} - -func decNegintPosintFloatNumberHelperInt64v(ui uint64, neg, incrIfNeg bool) (i int64) { - if neg && incrIfNeg { - ui++ - } - i = chkOvf.SignedIntV(ui) - if neg { - i = -i - } - return -} - -func (x decNegintPosintFloatNumberHelper) int64(ui uint64, neg, ok bool) (i int64) { - if ok { - return decNegintPosintFloatNumberHelperInt64v(ui, neg, x.d.cbor) - } - // return x.int64TryFloat() - // } - // func (x decNegintPosintFloatNumberHelper) int64TryFloat() (i int64) { - f, ok := x.d.d.decFloat() - if ok && noFrac64(math.Float64bits(f)) { - i = int64(f) - } else { - x.d.errorf("invalid number loading uint64, with descriptor: %v", x.d.d.descBd()) - } - return -} - -func (x decNegintPosintFloatNumberHelper) float64(f float64, ok bool) float64 { - if ok { - return f - } - return x.float64TryInteger() -} - -func (x decNegintPosintFloatNumberHelper) float64TryInteger() float64 { - ui, neg, ok := x.d.d.decInteger() +func (helperDecDriver[T]) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDs[T]) (f *fastpathD[T], u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) if !ok { - x.d.errorf("invalid descriptor for float: %v", x.d.d.descBd()) + return } - return float64(decNegintPosintFloatNumberHelperInt64v(ui, neg, x.d.cbor)) -} - -// isDecodeable checks if value can be decoded into -// -// decode can take any reflect.Value that is a inherently addressable i.e. -// - non-nil chan (we will SEND to it) -// - non-nil slice (we will set its elements) -// - non-nil map (we will put into it) -// - non-nil pointer (we can "update" it) -// - func: no -// - interface: no -// - array: if canAddr=true -// - any other value pointer: if canAddr=true -func isDecodeable(rv reflect.Value) (canDecode bool, reason decNotDecodeableReason) { - switch rv.Kind() { - case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map: - canDecode = !rvIsNil(rv) - reason = decNotDecodeableReasonNilReference - case reflect.Func, reflect.Interface, reflect.Invalid, reflect.UnsafePointer: - reason = decNotDecodeableReasonBadKind - default: - canDecode = rv.CanAddr() - reason = decNotDecodeableReasonNonAddrValue - } - return -} - -func decByteSlice(r *decRd, clen, maxInitLen int, bs []byte) (bsOut []byte) { - if clen <= 0 { - bsOut = zeroByteSlice - } else if cap(bs) >= clen { - bsOut = bs[:clen] - r.readb(bsOut) + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) } else { - var len2 int - for len2 < clen { - len3 := decInferLen(clen-len2, maxInitLen, 1) - bs3 := bsOut - bsOut = make([]byte, len2+len3) - copy(bsOut, bs3) - r.readb(bsOut[len2:]) - len2 += len3 - } + u = f.rt } return } -// decInferLen will infer a sensible length, given the following: -// - clen: length wanted. -// - maxlen: max length to be returned. -// if <= 0, it is unset, and we infer it based on the unit size -// - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) int { - // anecdotal testing showed increase in allocation with map length of 16. - // We saw same typical alloc from 0-8, then a 20% increase at 16. - // Thus, we set it to 8. - const ( - minLenIfUnset = 8 - maxMem = 256 * 1024 // 256Kb Memory - ) - - // handle when maxlen is not set i.e. <= 0 - - // clen==0: use 0 - // maxlen<=0, clen<0: use default - // maxlen> 0, clen<0: use default - // maxlen<=0, clen>0: infer maxlen, and cap on it - // maxlen> 0, clen>0: cap at maxlen - - if clen == 0 || clen == containerLenNil { - return 0 - } - if clen < 0 { - // if unspecified, return 64 for bytes, ... 8 for uint64, ... and everything else - clen = 64 / unit - if clen > minLenIfUnset { - return clen +func (helperDecDriver[T]) decFindRtidFn(s []decRtidFn[T], rtid uintptr) (i uint, fn *decFn[T]) { + // binary search. Adapted from sort/search.go. Use goto (not for loop) to allow inlining. + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h } - return minLenIfUnset + goto LOOP } - if unit <= 0 { - return clen + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn } - if maxlen <= 0 { - maxlen = maxMem / unit - } - if clen < maxlen { - return clen - } - return maxlen + return +} + +func (helperDecDriver[T]) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFn[T]) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFn[T]](v)) + } + return +} + +func (dh helperDecDriver[T]) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDs[T], + checkExt bool) (fn *decFn[T]) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriver[T]) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDs[T], + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFn[T]) { + rtid := rt2id(rt) + var sp []decRtidFn[T] = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriver[T]) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDs[T], + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFn[T]) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFn[T] + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + // since this is an atomic load/store, we MUST use a different array each time, + // else we have a data race when a store is happening simultaneously with a decFindRtidFn call. + if sp == nil { + sp = []decRtidFn[T]{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFn[T], len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFn[T]{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriver[T]) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDs[T], + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFn[T]) { + fn = new(decFn[T]) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + // anything can be an extension except the built-in ones: time, raw and rawext. + // ensure we check for these types, then if extension, before checking if + // it implementes one of the pre-declared interfaces. + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoder[T]).kTime + } else if rtid == rawTypId { + fn.fd = (*decoder[T]).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoder[T]).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoder[T]).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoder[T]).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoder[T]).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fd = (*decoder[T]).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoder[T]).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { // un-named type (slice or mpa or array) + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false // decode directly into array value (slice made from it) + } + } + } else { // named type (with underlying type of map or slice or array) + // try to use mapping for underlying type + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false // decode directly into array value (slice made from it) + fn.fd = func(d *decoder[T], xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false // meaning it can be an address(ptr) or a value + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoder[T], xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoder[T]).kBool + case reflect.String: + fn.fd = (*decoder[T]).kString + case reflect.Int: + fn.fd = (*decoder[T]).kInt + case reflect.Int8: + fn.fd = (*decoder[T]).kInt8 + case reflect.Int16: + fn.fd = (*decoder[T]).kInt16 + case reflect.Int32: + fn.fd = (*decoder[T]).kInt32 + case reflect.Int64: + fn.fd = (*decoder[T]).kInt64 + case reflect.Uint: + fn.fd = (*decoder[T]).kUint + case reflect.Uint8: + fn.fd = (*decoder[T]).kUint8 + case reflect.Uint16: + fn.fd = (*decoder[T]).kUint16 + case reflect.Uint32: + fn.fd = (*decoder[T]).kUint32 + case reflect.Uint64: + fn.fd = (*decoder[T]).kUint64 + case reflect.Uintptr: + fn.fd = (*decoder[T]).kUintptr + case reflect.Float32: + fn.fd = (*decoder[T]).kFloat32 + case reflect.Float64: + fn.fd = (*decoder[T]).kFloat64 + case reflect.Complex64: + fn.fd = (*decoder[T]).kComplex64 + case reflect.Complex128: + fn.fd = (*decoder[T]).kComplex128 + case reflect.Chan: + fn.fd = (*decoder[T]).kChan + case reflect.Slice: + fn.fd = (*decoder[T]).kSlice + case reflect.Array: + fi.addrD = false // decode directly into array value (slice made from it) + fn.fd = (*decoder[T]).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoder[T]).kStructSimple + } else { + fn.fd = (*decoder[T]).kStruct + } + case reflect.Map: + fn.fd = (*decoder[T]).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*decoder[T]).kInterface + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fd = (*decoder[T]).kErr + } + } + } + return } diff --git a/vendor/github.com/ugorji/go/codec/doc.go b/vendor/github.com/ugorji/go/codec/doc.go index 1a16bca8b..750dd234a 100644 --- a/vendor/github.com/ugorji/go/codec/doc.go +++ b/vendor/github.com/ugorji/go/codec/doc.go @@ -12,7 +12,7 @@ Supported Serialization formats are: - binc: http://github.com/ugorji/binc - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: + - simple: (unpublished) This package will carefully use 'package unsafe' for performance reasons in specific places. You can build without unsafe use by passing the safe or appengine tag @@ -78,6 +78,32 @@ Rich Feature Set includes: msgpack-rpc protocol defined at: https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +# Supported build tags + +We gain performance by code-generating fast-paths for slices and maps of built-in types, +and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits. + +The results are 20-40% performance improvements. + +Building and running is configured using build tags as below. + +At runtime: + +- codec.safe: run in safe mode (not using unsafe optimizations) +- codec.notmono: use generics code (bypassing performance-boosting monomorphized code) +- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes) + +Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath. +Pls use these mostly during development - use codec.XXX in your go files. + +Build only: + +- codec.build: used to generate fastpath and monomorphization code + +Test only: + +- codec.notmammoth: skip the mammoth generated tests + # Extension Support Users can register a function to handle the encoding or decoding of @@ -203,6 +229,10 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g. go test -tags codec.safe -run Json go test -tags "alltests codec.safe" -run Suite +You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g. + + go test -tags codec.notmono -run Json + Running Benchmarks cd bench @@ -225,3 +255,87 @@ Embedded fields are encoded as if they exist in the top-level struct, with some caveats. See Encode documentation. */ package codec + +/* +Generics + +Generics are used across to board to reduce boilerplate, and hopefully +improve performance by +- reducing need for interface calls (de-virtualization) +- resultant inlining of those calls + +encoder/decoder --> Driver (json/cbor/...) --> input/output (bytes or io abstraction) + +There are 2 * 5 * 2 (20) combinations of monomorphized values. + +Key rules +- do not use top-level generic functions. + Due to type inference, monomorphizing them proves challenging +- only use generic methods. + Monomorphizing is done at the type once, and method names need not change +- do not have method calls have a parameter of an encWriter or decReader. + All those calls are handled directly by the driver. +- Include a helper type for each parameterized thing, and add all generic functions to them e.g. + helperEncWriter[T encWriter] + helperEncReader[T decReader] + helperEncDriver[T encDriver] + helperDecDriver[T decDriver] +- Always use T as the generic type name (when needed) +- No inline types +- No closures taking parameters of generic types + +*/ +/* +Naming convention: + +Currently, as generic and non-generic types/functions/vars are put in the same files, +we suffer because: +- build takes longer as non-generic code is built when a build tag wants only monomorphised code +- files have many lines which are not used at runtime (due to type parameters) +- code coverage is inaccurate on a single run + +To resolve this, we are streamlining our file naming strategy. + +Basically, we will have the following nomenclature for filenames: +- fastpath (tag:notfastpath): *.notfastpath.*.go vs *.fastpath.*.go +- typed parameters (tag:notmono): *.notmono.*.go vs *.mono.*.go +- safe (tag:safe): *.safe.*.go vs *.unsafe.go +- generated files: *.generated.go +- all others (tags:N/A): *.go without safe/mono/fastpath/generated in the name + +The following files will be affected and split/renamed accordingly + +Base files: +- binc.go +- cbor.go +- json.go +- msgpack.go +- simple.go +- decode.go +- encode.go + +For each base file, split into __file__.go (containing type parameters) and __file__.base.go. +__file__.go will only build with notmono. + +Other files: +- fastpath.generated.go -> base.fastpath.generated.go and base.fastpath.notmono.generated.go +- fastpath.not.go -> base.notfastpath.go +- init.go -> init.notmono.go + +Appropriate build tags will be included in the files, and the right ones only used for +monomorphization. +*/ +/* +Caching Handle options for fast runtime use + +If using cached values from Handle options, then +- re-cache them at each reset() call +- reset is always called at the start of each (Must)(En|De)code + - which calls (en|de)coder.reset([]byte|io.Reader|String) + - which calls (en|de)cDriver.reset() +- at reset, (en|de)c(oder|Driver) can re-cache Handle options before each run + +Some examples: +- json: e.rawext,di,d,ks,is / d.rawext +- decode: (decoderBase) d.jsms,mtr,str, +*/ diff --git a/vendor/github.com/ugorji/go/codec/encode.base.go b/vendor/github.com/ugorji/go/codec/encode.base.go new file mode 100644 index 000000000..0ded90465 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/encode.base.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "cmp" + "errors" + "io" + "reflect" + "slices" + "sync" + "time" +) + +var errEncoderNotInitialized = errors.New("encoder not initialized") + +var encBuiltinRtids []uintptr + +func init() { + for _, v := range []interface{}{ + (string)(""), + (bool)(false), + (int)(0), + (int8)(0), + (int16)(0), + (int32)(0), + (int64)(0), + (uint)(0), + (uint8)(0), + (uint16)(0), + (uint32)(0), + (uint64)(0), + (uintptr)(0), + (float32)(0), + (float64)(0), + (complex64)(0), + (complex128)(0), + (time.Time{}), + ([]byte)(nil), + (Raw{}), + // (interface{})(nil), + } { + t := reflect.TypeOf(v) + encBuiltinRtids = append(encBuiltinRtids, rt2id(t), rt2id(reflect.PointerTo(t))) + } + slices.Sort(encBuiltinRtids) +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriverI interface { + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // re is never nil + EncodeRawExt(re *RawExt) + // ext is never nil + EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) + // EncodeString using cUTF8, honor'ing StringToRaw flag + EncodeString(v string) + EncodeStringNoEscape4Json(v string) + // encode a non-nil []byte + EncodeStringBytesRaw(v []byte) + // encode a []byte as nil, empty or encoded sequence of bytes depending on context + EncodeBytes(v []byte) + EncodeTime(time.Time) + WriteArrayStart(length int) + WriteArrayEnd() + WriteMapStart(length int) + WriteMapEnd() + + // these write a zero-len map or array into the stream + WriteMapEmpty() + WriteArrayEmpty() + + writeNilMap() + writeNilArray() + writeNilBytes() + + // these are no-op except for json + encDriverContainerTracker + + // reset will reset current encoding runtime state, and cached information from the handle + reset() + + atEndOfEncode() + writerEnd() + + writeBytesAsis(b []byte) + // writeStringAsisDblQuoted(v string) + + resetOutBytes(out *[]byte) + resetOutIO(out io.Writer) + + init(h Handle, shared *encoderBase, enc encoderI) (fp interface{}) + + // driverStateManager +} + +type encInit2er struct{} + +func (encInit2er) init2(enc encoderI) {} + +type encDriverContainerTracker interface { + WriteArrayElem(firstTime bool) + WriteMapElemKey(firstTime bool) + WriteMapElemValue() +} + +type encDriverNoState struct{} + +// func (encDriverNoState) captureState() interface{} { return nil } +// func (encDriverNoState) resetState() {} +// func (encDriverNoState) restoreState(v interface{}) {} +func (encDriverNoState) reset() {} + +type encDriverNoopContainerWriter struct{} + +func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (encDriverNoopContainerWriter) WriteArrayEnd() {} +func (encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (encDriverNoopContainerWriter) WriteMapEnd() {} +func (encDriverNoopContainerWriter) atEndOfEncode() {} + +// encStructFieldObj[Slice] is used for sorting when there are missing fields and canonical flag is set +type encStructFieldObj struct { + key string + rv reflect.Value + intf interface{} + isRv bool + noEsc4json bool + builtin bool +} + +type encStructFieldObjSlice []encStructFieldObj + +func (p encStructFieldObjSlice) Len() int { return len(p) } +func (p encStructFieldObjSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } +func (p encStructFieldObjSlice) Less(i, j int) bool { + return p[uint(i)].key < p[uint(j)].key +} + +// ---- + +type orderedRv[T cmp.Ordered] struct { + v T + r reflect.Value +} + +func cmpOrderedRv[T cmp.Ordered](v1, v2 orderedRv[T]) int { + return cmp.Compare(v1.v, v2.v) +} + +// ---- + +type encFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + addrE bool + // addrEf bool // force: if addrE, then encode function MUST take a ptr +} + +// ---- + +// EncodeOptions captures configuration options during encode. +type EncodeOptions struct { + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int + + // ChanRecvTimeout is the timeout used when selecting from a chan. + // + // Configuring this controls how we receive from a chan during the encoding process. + // - If ==0, we only consume the elements currently available in the chan. + // - if <0, we consume until the chan is closed. + // - If >0, we consume until this timeout. + ChanRecvTimeout time.Duration + + // StructToArray specifies to encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls how we determine whether a value is empty. + // + // If true, we descend into interfaces and pointers to reursively check if value is empty. + // + // We *might* check struct fields one by one to see if empty + // (if we cannot directly check if a struct value is equal to its zero value). + // If so, we honor IsZero, Comparable, IsCodecEmpty(), etc. + // Note: This *may* make OmitEmpty more expensive due to the large number of reflect calls. + // + // If false, we check if the value is equal to its zero value (newly allocated state). + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // StringToRaw controls how strings are encoded. + // + // As a go string is just an (immutable) sequence of bytes, + // it can be encoded either as raw bytes or as a UTF string. + // + // By default, strings are encoded as UTF-8. + // but can be treated as []byte during an encode. + // + // Note that things which we know (by definition) to be UTF-8 + // are ALWAYS encoded as UTF-8 strings. + // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. + StringToRaw bool + + // OptimumSize controls whether we optimize for the smallest size. + // + // Some formats will use this flag to determine whether to encode + // in the smallest size possible, even if it takes slightly longer. + // + // For example, some formats that support half-floats might check if it is possible + // to store a float64 as a half float. Doing this check has a small performance cost, + // but the benefit is that the encoded message will be smaller. + OptimumSize bool + + // NoAddressableReadonly controls whether we try to force a non-addressable value + // to be addressable so we can call a pointer method on it e.g. for types + // that support Selfer, json.Marshaler, etc. + // + // Use it in the very rare occurrence that your types modify a pointer value when calling + // an encode callback function e.g. JsonMarshal, TextMarshal, BinaryMarshal or CodecEncodeSelf. + NoAddressableReadonly bool + + // NilCollectionToZeroLength controls whether we encode nil collections (map, slice, chan) + // as nil (e.g. null if using JSON) or as zero length collections (e.g. [] or {} if using JSON). + // + // This is useful in many scenarios e.g. + // - encoding in go, but decoding the encoded stream in python + // where context of the type is missing but needed + // + // Note: this flag ignores the MapBySlice tag, and will encode nil slices, maps and chan + // in their natural zero-length formats e.g. a slice in json encoded as [] + // (and not nil or {} if MapBySlice tag). + NilCollectionToZeroLength bool +} + +// --------------------------------------------- + +// encoderBase is shared as a field between Encoder and its encDrivers. +// This way, encDrivers need not hold a referece to the Encoder itself. +type encoderBase struct { + perType encPerType + + h *BasicHandle + + // MARKER: these fields below should belong directly in Encoder. + // There should not be any pointers here - just values. + // we pack them here for space efficiency and cache-line optimization. + + rtidFn, rtidFnNoExt *atomicRtidFnSlice + + // se encoderI + err error + + blist bytesFreeList + + // js bool // is json encoder? + // be bool // is binary encoder? + + bytes bool + + c containerState + + calls uint16 + seq uint16 // sequencer (e.g. used by binc for symbols, etc) + + // ---- cpu cache line boundary + hh Handle + + // ---- cpu cache line boundary + + // ---- writable fields during execution --- *try* to keep in sep cache line + + ci circularRefChecker + + slist sfiRvFreeList +} + +func (e *encoderBase) HandleName() string { + return e.hh.Name() +} + +// Release is a no-op. +// +// Deprecated: Pooled resources are not used with an Encoder. +// This method is kept for compatibility reasons only. +func (e *encoderBase) Release() { +} + +func (e *encoderBase) setContainerState(cs containerState) { + if cs != 0 { + e.c = cs + } +} + +func (e *encoderBase) haltOnMbsOddLen(length int) { + if length&1 != 0 { // similar to &1==1 or %2 == 1 + halt.errorInt("mapBySlice requires even slice length, but got ", int64(length)) + } +} + +// addrRV returns a addressable value given that rv is not addressable +func (e *encoderBase) addrRV(rv reflect.Value, typ, ptrType reflect.Type) (rva reflect.Value) { + // if rv.CanAddr() { + // return rvAddr(rv, ptrType) + // } + if e.h.NoAddressableReadonly { + rva = reflect.New(typ) + rvSetDirect(rva.Elem(), rv) + return + } + return rvAddr(e.perType.AddressableRO(rv), ptrType) +} + +func (e *encoderBase) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, e.hh.Name(), 0, true) +} + +func (e *encoderBase) kErr(_ *encFnInfo, rv reflect.Value) { + halt.errorf("unsupported encoding kind: %s, for %#v", rv.Kind(), any(rv)) +} + +func chanToSlice(rv reflect.Value, rtslice reflect.Type, timeout time.Duration) (rvcs reflect.Value) { + rvcs = rvZeroK(rtslice, reflect.Slice) + if timeout < 0 { // consume until close + for { + recv, recvOk := rv.Recv() + if !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } else { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} + if timeout == 0 { + cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} + } else { + tt := time.NewTimer(timeout) + cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} + } + for { + chosen, recv, recvOk := reflect.Select(cases) + if chosen == 1 || !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } + return +} + +type encoderI interface { + Encode(v interface{}) error + MustEncode(v interface{}) + Release() + Reset(w io.Writer) + ResetBytes(out *[]byte) + + wrapErr(v error, err *error) + atEndOfEncode() + writerEnd() + + encodeI(v interface{}) + encodeR(v reflect.Value) + encodeAs(v interface{}, t reflect.Type, ext bool) + + setContainerState(cs containerState) // needed for canonical encoding via side encoder +} + +var errEncNoResetBytesWithWriter = errors.New("cannot reset an Encoder which outputs to []byte with a io.Writer") +var errEncNoResetWriterWithBytes = errors.New("cannot reset an Encoder which outputs to io.Writer with a []byte") + +type encDriverContainerNoTrackerT struct{} + +func (encDriverContainerNoTrackerT) WriteArrayElem(firstTime bool) {} +func (encDriverContainerNoTrackerT) WriteMapElemKey(firstTime bool) {} +func (encDriverContainerNoTrackerT) WriteMapElemValue() {} + +type Encoder struct { + encoderI +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to configure WriterBufferSize on the handle +// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + return &Encoder{h.newEncoder(w)} +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + return &Encoder{h.newEncoderBytes(out)} +} + +// ---- + +func sideEncode(h Handle, p *sync.Pool, fn func(encoderI)) { + var s encoderI + if usePoolForSideEncode { + s = p.Get().(encoderI) + defer p.Put(s) + } else { + // initialization cycle error + // s = NewEncoderBytes(nil, h).encoderI + s = p.New().(encoderI) + } + fn(s) +} + +func oneOffEncode(se encoderI, v interface{}, out *[]byte, basetype reflect.Type, ext bool) { + se.ResetBytes(out) + se.encodeAs(v, basetype, ext) + se.atEndOfEncode() + se.writerEnd() + // e.sideEncoder(&bs) + // e.sideEncode(v, basetype, 0) +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go index 0e9f0cc05..b203036b4 100644 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -1,3 +1,5 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -5,412 +7,295 @@ package codec import ( "encoding" - "errors" "io" "reflect" + "slices" "sort" "strconv" + "sync" "time" ) -// defEncByteBufSize is the default size of []byte used -// for bufio buffer or []byte (when nil passed) -const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 +type helperEncDriver[T encDriver] struct{} -var errEncoderNotInitialized = errors.New("Encoder not initialized") - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - EncodeNil() - EncodeInt(i int64) - EncodeUint(i uint64) - EncodeBool(b bool) - EncodeFloat32(f float32) - EncodeFloat64(f float64) - EncodeRawExt(re *RawExt) - EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) - // EncodeString using cUTF8, honor'ing StringToRaw flag - EncodeString(v string) - EncodeStringBytesRaw(v []byte) - EncodeTime(time.Time) - WriteArrayStart(length int) - WriteArrayEnd() - WriteMapStart(length int) - WriteMapEnd() - - // reset will reset current encoding runtime state, and cached information from the handle - reset() - - encoder() *Encoder - - driverStateManager +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn[T encDriver] struct { + i encFnInfo + fe func(*encoder[T], *encFnInfo, reflect.Value) + // _ [1]uint64 // padding (cache-aligned) } -type encDriverContainerTracker interface { - WriteArrayElem() - WriteMapElemKey() - WriteMapElemValue() +type encRtidFn[T encDriver] struct { + rtid uintptr + fn *encFn[T] } -type encDriverNoState struct{} - -func (encDriverNoState) captureState() interface{} { return nil } -func (encDriverNoState) reset() {} -func (encDriverNoState) resetState() {} -func (encDriverNoState) restoreState(v interface{}) {} - -type encDriverNoopContainerWriter struct{} - -func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} -func (encDriverNoopContainerWriter) WriteArrayEnd() {} -func (encDriverNoopContainerWriter) WriteMapStart(length int) {} -func (encDriverNoopContainerWriter) WriteMapEnd() {} - -// encStructFieldObj[Slice] is used for sorting when there are missing fields and canonical flag is set -type encStructFieldObj struct { - key string - rv reflect.Value - intf interface{} - ascii bool - isRv bool +// Encoder writes an object to an output stream in a supported format. +// +// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Encoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type encoder[T encDriver] struct { + dh helperEncDriver[T] + fp *fastpathEs[T] + e T + encoderBase } -type encStructFieldObjSlice []encStructFieldObj - -func (p encStructFieldObjSlice) Len() int { return len(p) } -func (p encStructFieldObjSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p encStructFieldObjSlice) Less(i, j int) bool { - return p[uint(i)].key < p[uint(j)].key +func (e *encoder[T]) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } } -// EncodeOptions captures configuration options during encode. -type EncodeOptions struct { - // WriterBufferSize is the size of the buffer used when writing. - // - // if > 0, we use a smart buffer internally for performance purposes. - WriterBufferSize int - - // ChanRecvTimeout is the timeout used when selecting from a chan. - // - // Configuring this controls how we receive from a chan during the encoding process. - // - If ==0, we only consume the elements currently available in the chan. - // - if <0, we consume until the chan is closed. - // - If >0, we consume until this timeout. - ChanRecvTimeout time.Duration - - // StructToArray specifies to encode a struct as an array, and not as a map - StructToArray bool - - // Canonical representation means that encoding a value will always result in the same - // sequence of bytes. - // - // This only affects maps, as the iteration order for maps is random. - // - // The implementation MAY use the natural sort order for the map keys if possible: - // - // - If there is a natural sort order (ie for number, bool, string or []byte keys), - // then the map keys are first sorted in natural order and then written - // with corresponding map values to the strema. - // - If there is no natural sort order, then the map keys will first be - // encoded into []byte, and then sorted, - // before writing the sorted keys and the corresponding map values to the stream. - // - Canonical bool - - // CheckCircularRef controls whether we check for circular references - // and error fast during an encode. - // - // If enabled, an error is received if a pointer to a struct - // references itself either directly or through one of its fields (iteratively). - // - // This is opt-in, as there may be a performance hit to checking circular references. - CheckCircularRef bool - - // RecursiveEmptyCheck controls how we determine whether a value is empty. - // - // If true, we descend into interfaces and pointers to reursively check if value is empty. - // - // We *might* check struct fields one by one to see if empty - // (if we cannot directly check if a struct value is equal to its zero value). - // If so, we honor IsZero, Comparable, IsCodecEmpty(), etc. - // Note: This *may* make OmitEmpty more expensive due to the large number of reflect calls. - // - // If false, we check if the value is equal to its zero value (newly allocated state). - RecursiveEmptyCheck bool - - // Raw controls whether we encode Raw values. - // This is a "dangerous" option and must be explicitly set. - // If set, we blindly encode Raw values as-is, without checking - // if they are a correct representation of a value in that format. - // If unset, we error out. - Raw bool - - // StringToRaw controls how strings are encoded. - // - // As a go string is just an (immutable) sequence of bytes, - // it can be encoded either as raw bytes or as a UTF string. - // - // By default, strings are encoded as UTF-8. - // but can be treated as []byte during an encode. - // - // Note that things which we know (by definition) to be UTF-8 - // are ALWAYS encoded as UTF-8 strings. - // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. - StringToRaw bool - - // OptimumSize controls whether we optimize for the smallest size. - // - // Some formats will use this flag to determine whether to encode - // in the smallest size possible, even if it takes slightly longer. - // - // For example, some formats that support half-floats might check if it is possible - // to store a float64 as a half float. Doing this check has a small performance cost, - // but the benefit is that the encoded message will be smaller. - OptimumSize bool - - // NoAddressableReadonly controls whether we try to force a non-addressable value - // to be addressable so we can call a pointer method on it e.g. for types - // that support Selfer, json.Marshaler, etc. - // - // Use it in the very rare occurrence that your types modify a pointer value when calling - // an encode callback function e.g. JsonMarshal, TextMarshal, BinaryMarshal or CodecEncodeSelf. - NoAddressableReadonly bool -} - -// --------------------------------------------- - -func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { - e.e.EncodeRawExt(rv2i(rv).(*RawExt)) -} - -func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) ext(f *encFnInfo, rv reflect.Value) { e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) } -func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { - rv2i(rv).(Selfer).CodecEncodeSelf(e) +func (e *encoder[T]) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) } -func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) binaryMarshal(_ *encFnInfo, rv reflect.Value) { bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() e.marshalRaw(bs, fnerr) } -func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) textMarshal(_ *encFnInfo, rv reflect.Value) { bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() e.marshalUtf8(bs, fnerr) } -func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) jsonMarshal(_ *encFnInfo, rv reflect.Value) { bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() e.marshalAsis(bs, fnerr) } -func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) raw(_ *encFnInfo, rv reflect.Value) { e.rawBytes(rv2i(rv).(Raw)) } -func (e *Encoder) encodeComplex64(v complex64) { +func (e *encoder[T]) encodeComplex64(v complex64) { if imag(v) != 0 { - e.errorf("cannot encode complex number: %v, with imaginary values: %v", v, imag(v)) + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) } e.e.EncodeFloat32(real(v)) } -func (e *Encoder) encodeComplex128(v complex128) { +func (e *encoder[T]) encodeComplex128(v complex128) { if imag(v) != 0 { - e.errorf("cannot encode complex number: %v, with imaginary values: %v", v, imag(v)) + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) } e.e.EncodeFloat64(real(v)) } -func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kBool(_ *encFnInfo, rv reflect.Value) { e.e.EncodeBool(rvGetBool(rv)) } -func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kTime(_ *encFnInfo, rv reflect.Value) { e.e.EncodeTime(rvGetTime(rv)) } -func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kString(_ *encFnInfo, rv reflect.Value) { e.e.EncodeString(rvGetString(rv)) } -func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kFloat32(_ *encFnInfo, rv reflect.Value) { e.e.EncodeFloat32(rvGetFloat32(rv)) } -func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kFloat64(_ *encFnInfo, rv reflect.Value) { e.e.EncodeFloat64(rvGetFloat64(rv)) } -func (e *Encoder) kComplex64(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kComplex64(_ *encFnInfo, rv reflect.Value) { e.encodeComplex64(rvGetComplex64(rv)) } -func (e *Encoder) kComplex128(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kComplex128(_ *encFnInfo, rv reflect.Value) { e.encodeComplex128(rvGetComplex128(rv)) } -func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kInt(_ *encFnInfo, rv reflect.Value) { e.e.EncodeInt(int64(rvGetInt(rv))) } -func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kInt8(_ *encFnInfo, rv reflect.Value) { e.e.EncodeInt(int64(rvGetInt8(rv))) } -func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kInt16(_ *encFnInfo, rv reflect.Value) { e.e.EncodeInt(int64(rvGetInt16(rv))) } -func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kInt32(_ *encFnInfo, rv reflect.Value) { e.e.EncodeInt(int64(rvGetInt32(rv))) } -func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kInt64(_ *encFnInfo, rv reflect.Value) { e.e.EncodeInt(int64(rvGetInt64(rv))) } -func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kUint(_ *encFnInfo, rv reflect.Value) { e.e.EncodeUint(uint64(rvGetUint(rv))) } -func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kUint8(_ *encFnInfo, rv reflect.Value) { e.e.EncodeUint(uint64(rvGetUint8(rv))) } -func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kUint16(_ *encFnInfo, rv reflect.Value) { e.e.EncodeUint(uint64(rvGetUint16(rv))) } -func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kUint32(_ *encFnInfo, rv reflect.Value) { e.e.EncodeUint(uint64(rvGetUint32(rv))) } -func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kUint64(_ *encFnInfo, rv reflect.Value) { e.e.EncodeUint(uint64(rvGetUint64(rv))) } -func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kUintptr(_ *encFnInfo, rv reflect.Value) { e.e.EncodeUint(uint64(rvGetUintptr(rv))) } -func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { - e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) -} - -func chanToSlice(rv reflect.Value, rtslice reflect.Type, timeout time.Duration) (rvcs reflect.Value) { - rvcs = rvZeroK(rtslice, reflect.Slice) - if timeout < 0 { // consume until close - for { - recv, recvOk := rv.Recv() - if !recvOk { - break - } - rvcs = reflect.Append(rvcs, recv) - } - } else { - cases := make([]reflect.SelectCase, 2) - cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} - if timeout == 0 { - cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} - } else { - tt := time.NewTimer(timeout) - cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} - } - for { - chosen, recv, recvOk := reflect.Select(cases) - if chosen == 1 || !recvOk { - break - } - rvcs = reflect.Append(rvcs, recv) - } - } - return -} - -func (e *Encoder) kSeqFn(rtelem reflect.Type) (fn *codecFn) { - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } +func (e *encoder[T]) kSeqFn(rt reflect.Type) (fn *encFn[T]) { // if kind is reflect.Interface, do not pre-determine the encoding type, // because preEncodeValue may break it down to a concrete type and kInterface will bomb. - if rtelem.Kind() != reflect.Interface { - fn = e.h.fn(rtelem) + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) } return } -func (e *Encoder) kSliceWMbs(rv reflect.Value, ti *typeInfo) { - var l = rvLenSlice(rv) - if l == 0 { - e.mapStart(0) +func (e *encoder[T]) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) } else { - e.haltOnMbsOddLen(l) - e.mapStart(l >> 1) // e.mapStart(l / 2) - fn := e.kSeqFn(ti.elem) - for j := 0; j < l; j++ { - if j&1 == 0 { // j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.encodeValue(rvSliceIndex(rv, j, ti), fn) - } + l = rv.Len() } - e.mapEnd() -} - -func (e *Encoder) kSliceW(rv reflect.Value, ti *typeInfo) { - var l = rvLenSlice(rv) - e.arrayStart(l) - if l > 0 { - fn := e.kSeqFn(ti.elem) - for j := 0; j < l; j++ { - e.arrayElem() - e.encodeValue(rvSliceIndex(rv, j, ti), fn) - } - } - e.arrayEnd() -} - -func (e *Encoder) kArrayWMbs(rv reflect.Value, ti *typeInfo) { - var l = rv.Len() if l == 0 { - e.mapStart(0) + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) // e.mapStart(l / 2) + + var fn *encFn[T] + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + // simulate do...while, since we already handled case of 0-length + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { // j%2 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + + // for j := 0; j < l; j++ { + // if j&1 == 0 { // j%2 == 0 { + // e.mapElemKey(j == 0) + // } else { + // e.mapElemValue() + // } + // rvv := rvSliceIndex(rv, j, ti) + // if builtin { + // e.encode(rv2i(baseRVRV(rvv))) + // } else { + // e.encodeValue(rvv, fn) + // } + // } + // e.mapEnd() +} + +func (e *encoder[T]) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) } else { - e.haltOnMbsOddLen(l) - e.mapStart(l >> 1) // e.mapStart(l / 2) - fn := e.kSeqFn(ti.elem) - for j := 0; j < l; j++ { - if j&1 == 0 { // j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.encodeValue(rv.Index(j), fn) - } + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return } - e.mapEnd() -} - -func (e *Encoder) kArrayW(rv reflect.Value, ti *typeInfo) { - var l = rv.Len() e.arrayStart(l) - if l > 0 { - fn := e.kSeqFn(ti.elem) - for j := 0; j < l; j++ { - e.arrayElem() - e.encodeValue(rv.Index(j), fn) - } + + var fn *encFn[T] + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) } - e.arrayEnd() + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + // if ti.tielem.flagEncBuiltin { + // for j := 0; j < l; j++ { + // e.arrayElem() + // e.encode(rv2i(baseRVRV(rIndex(rv, j, ti)))) + // } + // } else { + // fn := e.kSeqFn(ti.elem) + // for j := 0; j < l; j++ { + // e.arrayElem() + // e.encodeValue(rvArrayIndex(rv, j, ti), fn) + // } + // } + + e.c = 0 + e.e.WriteArrayEnd() } -func (e *Encoder) kChan(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kChan(f *encFnInfo, rv reflect.Value) { if f.ti.chandir&uint8(reflect.RecvDir) == 0 { - e.errorf("send-only channel cannot be encoded") + halt.errorStr("send-only channel cannot be encoded") } if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { e.kSliceBytesChan(rv) @@ -420,33 +305,34 @@ func (e *Encoder) kChan(f *codecFnInfo, rv reflect.Value) { rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) if f.ti.mbs { - e.kSliceWMbs(rv, ti) + e.kArrayWMbs(rv, ti, true) } else { - e.kSliceW(rv, ti) + e.kArrayW(rv, ti, true) } } -func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kSlice(f *encFnInfo, rv reflect.Value) { if f.ti.mbs { - e.kSliceWMbs(rv, f.ti) + e.kArrayWMbs(rv, f.ti, true) } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { - e.e.EncodeStringBytesRaw(rvGetBytes(rv)) + // 'uint8TypId == rt2id(f.ti.elem)' checks types having underlying []byte + e.e.EncodeBytes(rvGetBytes(rv)) } else { - e.kSliceW(rv, f.ti) + e.kArrayW(rv, f.ti, true) } } -func (e *Encoder) kArray(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kArray(f *encFnInfo, rv reflect.Value) { if f.ti.mbs { - e.kArrayWMbs(rv, f.ti) + e.kArrayWMbs(rv, f.ti, false) } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { - e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, []byte{})) + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) // bytes from array never nil } else { - e.kArrayW(rv, f.ti) + e.kArrayW(rv, f.ti, false) } } -func (e *Encoder) kSliceBytesChan(rv reflect.Value) { +func (e *encoder[T]) kSliceBytesChan(rv reflect.Value) { // do not use range, so that the number of elements encoded // does not change, and encoding does not hang waiting on someone to close chan. @@ -487,78 +373,132 @@ L1: } } - e.e.EncodeStringBytesRaw(bs) + e.e.EncodeBytes(bs) e.blist.put(bs) if !byteSliceSameData(bs0, bs) { e.blist.put(bs0) } } -func (e *Encoder) kStructSfi(f *codecFnInfo) []*structFieldInfo { - if e.h.Canonical { - return f.ti.sfi.sorted() - } - return f.ti.sfi.source() -} - -func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { - var tisfi []*structFieldInfo - if f.ti.toArray || e.h.StructToArray { // toArray - tisfi = f.ti.sfi.source() - e.arrayStart(len(tisfi)) - for _, si := range tisfi { - e.arrayElem() - e.encodeValue(si.path.field(rv), nil) - } - e.arrayEnd() +func (e *encoder[T]) kStructFieldKey(keyType valueType, encName string) { + // use if (not switch) block, so that branch prediction picks valueTypeString first + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) } else { - tisfi = e.kStructSfi(f) - e.mapStart(len(tisfi)) - keytyp := f.ti.keyType - for _, si := range tisfi { - e.mapElemKey() - e.kStructFieldKey(keytyp, si.path.encNameAsciiAlphaNum, si.encName) - e.mapElemValue() - e.encodeValue(si.path.field(rv), nil) + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + // e.dh.encStructFieldKey(e.e, encName, keyType, encNameAsciiAlphaNum, e.js) +} + +func (e *encoder[T]) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e // early asserts e, e.e are not nil once + tisfi := f.ti.sfi.source() + + // To bypass encodeValue, we need to handle cases where + // the field is an interface kind. To do this, we need to handle an + // interface or a pointer to an interface differently. + // + // Easiest to just delegate to encodeValue. + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + // use value of chkCirRef ie if true, then send the addr of the value + if f.ti.toArray || e.h.StructToArray { // toArray + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return } - e.mapEnd() + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() } } -func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) { - encStructFieldKey(encName, e.e, e.w(), keyType, encNameAsciiAlphaNum, e.js) -} - -func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { - var newlen int +func (e *encoder[T]) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e // early asserts e, e.e are not nil once ti := f.ti toMap := !(ti.toArray || e.h.StructToArray) var mf map[string]interface{} if ti.flagMissingFielder { + toMap = true mf = rv2i(rv).(MissingFielder).CodecMissingFields() - toMap = true - newlen += len(mf) } else if ti.flagMissingFielderPtr { - rv2 := e.addrRV(rv, ti.rt, ti.ptr) - mf = rv2i(rv2).(MissingFielder).CodecMissingFields() toMap = true - newlen += len(mf) + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } } + newlen := len(mf) tisfi := ti.sfi.source() newlen += len(tisfi) var fkvs = e.slist.get(newlen)[:newlen] recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int var kv sfiRv var j int + var sf encStructFieldObj if toMap { newlen = 0 - for _, si := range e.kStructSfi(f) { - kv.r = si.path.field(rv) - if si.path.omitEmpty && isEmptyValue(kv.r, e.h.TypeInfos, recur) { - continue + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + // kv.r = si.path.field(rv, false, si.encBuiltin || !chkCirRef) + // if si.omitEmpty && isEmptyValue(kv.r, e.h.TypeInfos, recur) { + // continue + // } + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) // test actual field val + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) } kv.v = si fkvs[newlen] = kv @@ -566,7 +506,7 @@ func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { } var mf2s []stringIntf - if len(mf) > 0 { + if len(mf) != 0 { mf2s = make([]stringIntf, 0, len(mf)) for k, v := range mf { if k == "" { @@ -579,87 +519,142 @@ func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { } } - e.mapStart(newlen + len(mf2s)) + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) // When there are missing fields, and Canonical flag is set, // we cannot have the missing fields and struct fields sorted independently. // We have to capture them together and sort as a unit. - if len(mf2s) > 0 && e.h.Canonical { + if len(mf2s) != 0 && e.h.Canonical { mf2w := make([]encStructFieldObj, newlen+len(mf2s)) for j = 0; j < newlen; j++ { kv = fkvs[j] - mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, kv.v.path.encNameAsciiAlphaNum, true} + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} } for _, v := range mf2s { - mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false} + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} j++ } sort.Sort((encStructFieldObjSlice)(mf2w)) - for _, v := range mf2w { - e.mapElemKey() - e.kStructFieldKey(ti.keyType, v.ascii, v.key) - e.mapElemValue() - if v.isRv { - e.encodeValue(v.rv, nil) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) } else { - e.encode(v.intf) + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + //e.encodeI(sf.intf) // MARKER inlined } } } else { keytyp := ti.keyType for j = 0; j < newlen; j++ { kv = fkvs[j] - e.mapElemKey() - e.kStructFieldKey(keytyp, kv.v.path.encNameAsciiAlphaNum, kv.v.encName) + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } e.mapElemValue() - e.encodeValue(kv.r, nil) + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } } for _, v := range mf2s { - e.mapElemKey() - e.kStructFieldKey(keytyp, false, v.v) + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) e.mapElemValue() - e.encode(v.i) + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + // e.encodeI(v.i) // MARKER inlined + j++ } } - e.mapEnd() + e.c = 0 + e.e.WriteMapEnd() } else { newlen = len(tisfi) for i, si := range tisfi { // use unsorted array (to match sequence in struct) - kv.r = si.path.field(rv) - // use the zero value. - // if a reference or struct, set to nil (so you do not output too much) - if si.path.omitEmpty && isEmptyValue(kv.r, e.h.TypeInfos, recur) { - switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: + // kv.r = si.path.field(rv, false, si.encBuiltin || !chkCirRef) + // kv.r = si.path.field(rv, false, !si.omitEmpty || si.encBuiltin || !chkCirRef) + if si.omitEmpty { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + kv.r = si.fieldNoAlloc(rv, false) // test actual field val + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { kv.r = reflect.Value{} //encode as nil } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) } + kv.v = si fkvs[i] = kv } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + // encode it all e.arrayStart(newlen) for j = 0; j < newlen; j++ { - e.arrayElem() - e.encodeValue(fkvs[j].r, nil) + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } } - e.arrayEnd() + e.c = 0 + e.e.WriteArrayEnd() } +END: // do not use defer. Instead, use explicit pool return at end of function. // defer has a cost we are trying to avoid. // If there is a panic and these slices are not returned, it is ok. e.slist.put(fkvs) } -func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { +func (e *encoder[T]) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e // early asserts e, e.e are not nil once l := rvLenMap(rv) - e.mapStart(l) if l == 0 { - e.mapEnd() + e.e.WriteMapEmpty() return } + e.mapStart(l) // determine the underlying key and val encFn's for the map. // This eliminates some work which is done for each loop iteration i.e. @@ -669,7 +664,7 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { // encoding type, because preEncodeValue may break it down to // a concrete type and kInterface will bomb. - var keyFn, valFn *codecFn + var keyFn, valFn *encFn[T] ktypeKind := reflect.Kind(f.ti.keykind) vtypeKind := reflect.Kind(f.ti.elemkind) @@ -681,7 +676,7 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { rtvalkind = rtval.Kind() } if rtvalkind != reflect.Interface { - valFn = e.h.fn(rtval) + valFn = e.fn(rtval) } var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) @@ -689,19 +684,20 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { rtkey := f.ti.key var keyTypeIsString = stringTypId == rt2id(rtkey) // rtkeyid if keyTypeIsString { - keyFn = e.h.fn(rtkey) + keyFn = e.fn(rtkey) } else { for rtkey.Kind() == reflect.Ptr { rtkey = rtkey.Elem() } if rtkey.Kind() != reflect.Interface { - keyFn = e.h.fn(rtkey) + keyFn = e.fn(rtkey) } } if e.h.Canonical { e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) - e.mapEnd() + e.c = 0 + e.e.WriteMapEnd() return } @@ -710,22 +706,35 @@ func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { var it mapIter mapRange(&it, rv, rvk, rvv, true) - for it.Next() { - e.mapElemKey() + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) if keyTypeIsString { - e.e.EncodeString(it.Key().String()) + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) } else { - e.encodeValue(it.Key(), keyFn) + e.encodeValue(rv, keyFn) } e.mapElemValue() - e.encodeValue(it.Value(), valFn) + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } } it.Done() - e.mapEnd() + e.c = 0 + e.e.WriteMapEnd() } -func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *codecFn) { +func (e *encoder[T]) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFn[T]) { + _ = e.e // early asserts e, e.e are not nil once // The base kind of the type of the map key is sufficient for ordering. // We only do out of band if that kind is not ordered (number or string), bool or time.Time. // If the key is a predeclared type, directly call methods on encDriver e.g. EncodeString @@ -735,9 +744,11 @@ func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valF mks := rv.MapKeys() rtkeyKind := rtkey.Kind() - kfast := mapKeyFastKindFor(rtkeyKind) - visindirect := mapStoresElemIndirect(uintptr(ti.elemsize)) - visref := refBitset.isset(ti.elemkind) + mparams := getMapReqParams(ti) + + // kfast := mapKeyFastKindFor(rtkeyKind) + // visindirect := mapStoresElemIndirect(uintptr(ti.elemsize)) + // visref := refBitset.isset(ti.elemkind) switch rtkeyKind { case reflect.Bool: @@ -751,104 +762,110 @@ func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valF mks[0], mks[1] = mks[1], mks[0] } for i := range mks { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) if rtkeydecl { e.e.EncodeBool(mks[i].Bool()) } else { e.encodeValueNonNil(mks[i], keyFn) } e.mapElemValue() - e.encodeValue(mapGet(rv, mks[i], rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) } case reflect.String: - mksv := make([]stringRv, len(mks)) + mksv := make([]orderedRv[string], len(mks)) for i, k := range mks { v := &mksv[i] v.r = k - v.v = k.String() + v.v = rvGetString(k) } - sort.Sort(stringRvSlice(mksv)) + slices.SortFunc(mksv, cmpOrderedRv) for i := range mksv { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) if rtkeydecl { e.e.EncodeString(mksv[i].v) } else { e.encodeValueNonNil(mksv[i].r, keyFn) } e.mapElemValue() - e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) } case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uint64Rv, len(mks)) + mksv := make([]orderedRv[uint64], len(mks)) for i, k := range mks { v := &mksv[i] v.r = k v.v = k.Uint() } - sort.Sort(uint64RvSlice(mksv)) + slices.SortFunc(mksv, cmpOrderedRv) for i := range mksv { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) if rtkeydecl { e.e.EncodeUint(mksv[i].v) } else { e.encodeValueNonNil(mksv[i].r, keyFn) } e.mapElemValue() - e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) } case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]int64Rv, len(mks)) + mksv := make([]orderedRv[int64], len(mks)) for i, k := range mks { v := &mksv[i] v.r = k v.v = k.Int() } - sort.Sort(int64RvSlice(mksv)) + slices.SortFunc(mksv, cmpOrderedRv) for i := range mksv { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) if rtkeydecl { e.e.EncodeInt(mksv[i].v) } else { e.encodeValueNonNil(mksv[i].r, keyFn) } e.mapElemValue() - e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) } case reflect.Float32: - mksv := make([]float64Rv, len(mks)) + mksv := make([]orderedRv[float64], len(mks)) for i, k := range mks { v := &mksv[i] v.r = k v.v = k.Float() } - sort.Sort(float64RvSlice(mksv)) + slices.SortFunc(mksv, cmpOrderedRv) for i := range mksv { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) if rtkeydecl { e.e.EncodeFloat32(float32(mksv[i].v)) } else { e.encodeValueNonNil(mksv[i].r, keyFn) } e.mapElemValue() - e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) } case reflect.Float64: - mksv := make([]float64Rv, len(mks)) + mksv := make([]orderedRv[float64], len(mks)) for i, k := range mks { v := &mksv[i] v.r = k v.v = k.Float() } - sort.Sort(float64RvSlice(mksv)) + slices.SortFunc(mksv, cmpOrderedRv) for i := range mksv { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) if rtkeydecl { e.e.EncodeFloat64(mksv[i].v) } else { e.encodeValueNonNil(mksv[i].r, keyFn) } e.mapElemValue() - e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) } default: if rtkey == timeTyp { @@ -858,12 +875,13 @@ func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valF v.r = k v.v = rv2i(k).(time.Time) } - sort.Sort(timeRvSlice(mksv)) + slices.SortFunc(mksv, cmpTimeRv) for i := range mksv { - e.mapElemKey() + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) e.e.EncodeTime(mksv[i].v) e.mapElemValue() - e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) } break } @@ -874,41 +892,27 @@ func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valF mksv := bs0 mksbv := make([]bytesRv, len(mks)) - func() { - // replicate sideEncode logic - defer func(wb bytesEncAppender, bytes bool, c containerState, state interface{}) { - e.wb = wb - e.bytes = bytes - e.c = c - e.e.restoreState(state) - }(e.wb, e.bytes, e.c, e.e.captureState()) - - // e2 := NewEncoderBytes(&mksv, e.hh) - e.wb = bytesEncAppender{mksv[:0], &mksv} - e.bytes = true - e.c = 0 - e.e.resetState() - + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) for i, k := range mks { v := &mksbv[i] l := len(mksv) - - e.c = containerMapKey - e.encodeValue(k, nil) - e.atEndOfEncode() - e.w().end() - + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() v.r = k v.v = mksv[l:] } - }() + }) - sort.Sort(bytesRvSlice(mksbv)) + slices.SortFunc(mksbv, cmpBytesRv) for j := range mksbv { - e.mapElemKey() - e.encWr.writeb(mksbv[j].v) + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) e.mapElemValue() - e.encodeValue(mapGet(rv, mksbv[j].r, rvv, kfast, visindirect, visref), valFn) + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) } e.blist.put(mksv) if !byteSliceSameData(bs0, mksv) { @@ -917,91 +921,29 @@ func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valF } } -// Encoder writes an object to an output stream in a supported format. -// -// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used -// concurrently in multiple goroutines. -// -// However, as Encoder could be allocation heavy to initialize, a Reset method is provided -// so its state can be reused to decode new input streams repeatedly. -// This is the idiomatic way to use. -type Encoder struct { - panicHdl - - e encDriver - - h *BasicHandle - - // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder - encWr - - // ---- cpu cache line boundary - hh Handle - - blist bytesFreelist - err error - - // ---- cpu cache line boundary - - // ---- writable fields during execution --- *try* to keep in sep cache line - - // ci holds interfaces during an encoding (if CheckCircularRef=true) - // - // We considered using a []uintptr (slice of pointer addresses) retrievable via rv.UnsafeAddr. - // However, it is possible for the same pointer to point to 2 different types e.g. - // type T struct { tHelper } - // Here, for var v T; &v and &v.tHelper are the same pointer. - // Consequently, we need a tuple of type and pointer, which interface{} natively provides. - ci []interface{} // []uintptr - - perType encPerType - - slist sfiRvFreelist -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to configure WriterBufferSize on the handle -// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - e := h.newEncDriver().encoder() - if w != nil { - e.Reset(w) - } - return e -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - e := h.newEncDriver().encoder() - if out != nil { - e.ResetBytes(out) - } - return e -} - -func (e *Encoder) HandleName() string { - return e.hh.Name() -} - -func (e *Encoder) init(h Handle) { +func (e *encoder[T]) init(h Handle) { initHandle(h) - e.err = errEncoderNotInitialized - e.bytes = true + callMake(&e.e) e.hh = h e.h = h.getBasicHandle() - e.be = e.hh.isBinary() + // e.be = e.hh.isBinary() + e.err = errEncoderNotInitialized + + // e.fp = fastpathEList[T]() + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEs[T]) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() } -func (e *Encoder) w() *encWr { - return &e.encWr -} - -func (e *Encoder) resetCommon() { +func (e *encoder[T]) reset() { e.e.reset() if e.ci != nil { e.ci = e.ci[:0] @@ -1012,26 +954,6 @@ func (e *Encoder) resetCommon() { e.err = nil } -// Reset resets the Encoder with a new output stream. -// -// This accommodates using the state of the Encoder, -// where it has "cached" information about sub-engines. -func (e *Encoder) Reset(w io.Writer) { - e.bytes = false - if e.wf == nil { - e.wf = new(bufioEncWriter) - } - e.wf.reset(w, e.h.WriterBufferSize, &e.blist) - e.resetCommon() -} - -// ResetBytes resets the Encoder with a new destination output []byte. -func (e *Encoder) ResetBytes(out *[]byte) { - e.bytes = true - e.wb.reset(encInBytes(out), out) - e.resetCommon() -} - // Encode writes an object into a stream. // // Encoding can be configured via the struct tag for the fields. @@ -1117,73 +1039,77 @@ func (e *Encoder) ResetBytes(out *[]byte) { // Note that struct field names and keys in map[string]XXX will be treated as symbols. // Some formats support symbols (e.g. binc) and will properly encode the string // only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { +// +// Note that an error from an Encode call will make the Encoder unusable moving forward. +// This is because the state of the Encoder, it's output stream, etc are no longer stable. +// Any subsequent calls to Encode will trigger the same error. +func (e *encoder[T]) Encode(v interface{}) (err error) { // tried to use closure, as runtime optimizes defer with no params. // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 - if !debugging { - defer func() { - // if error occurred during encoding, return that error; - // else if error occurred on end'ing (i.e. during flush), return that error. - if x := recover(); x != nil { - panicValToErr(e, x, &e.err) - err = e.err - } - }() - } - - e.MustEncode(v) + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) return } // MustEncode is like Encode, but panics if unable to Encode. // // Note: This provides insight to the code location that triggered the error. -func (e *Encoder) MustEncode(v interface{}) { +// +// Note that an error from an Encode call will make the Encoder unusable moving forward. +// This is because the state of the Encoder, it's output stream, etc are no longer stable. +// Any subsequent calls to Encode will trigger the same error. +func (e *encoder[T]) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoder[T]) mustEncode(v interface{}) { halt.onerror(e.err) if e.hh == nil { halt.onerror(errNoFormatHandle) } e.calls++ - e.encode(v) + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + // e.encodeI(v) // MARKER inlined e.calls-- if e.calls == 0 { - e.atEndOfEncode() - e.w().end() + e.e.atEndOfEncode() + e.e.writerEnd() } } -// Release is a no-op. -// -// Deprecated: Pooled resources are not used with an Encoder. -// This method is kept for compatibility reasons only. -func (e *Encoder) Release() { +func (e *encoder[T]) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } } -func (e *Encoder) encode(iv interface{}) { - // MARKER: a switch with only concrete types can be optimized. - // consequently, we deal with nil and interfaces outside the switch. - - if iv == nil { - e.e.EncodeNil() - return +func (e *encoder[T]) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + // panic("invalid type passed to encodeBuiltin") + // halt.errorf("invalid type passed to encodeBuiltin: %T", iv) + // MARKER: calling halt.errorf pulls in fmt.Sprintf/Errorf which makes this non-inlineable + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") } +} - rv, ok := isNil(iv) - if ok { - e.e.EncodeNil() - return - } +func (e *encoder[T]) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} +func (e *encoder[T]) encodeBuiltin(iv interface{}) (ok bool) { + ok = true switch v := iv.(type) { - // case nil: + case nil: + e.e.EncodeNil() // case Selfer: case Raw: e.rawBytes(v) - case reflect.Value: - e.encodeValue(v, nil) - case string: e.e.EncodeString(v) case bool: @@ -1221,157 +1147,125 @@ func (e *Encoder) encode(iv interface{}) { case time.Time: e.e.EncodeTime(v) case []byte: - e.e.EncodeStringBytesRaw(v) - case *Raw: - e.rawBytes(*v) - case *string: - e.e.EncodeString(*v) - case *bool: - e.e.EncodeBool(*v) - case *int: - e.e.EncodeInt(int64(*v)) - case *int8: - e.e.EncodeInt(int64(*v)) - case *int16: - e.e.EncodeInt(int64(*v)) - case *int32: - e.e.EncodeInt(int64(*v)) - case *int64: - e.e.EncodeInt(*v) - case *uint: - e.e.EncodeUint(uint64(*v)) - case *uint8: - e.e.EncodeUint(uint64(*v)) - case *uint16: - e.e.EncodeUint(uint64(*v)) - case *uint32: - e.e.EncodeUint(uint64(*v)) - case *uint64: - e.e.EncodeUint(*v) - case *uintptr: - e.e.EncodeUint(uint64(*v)) - case *float32: - e.e.EncodeFloat32(*v) - case *float64: - e.e.EncodeFloat64(*v) - case *complex64: - e.encodeComplex64(*v) - case *complex128: - e.encodeComplex128(*v) - case *time.Time: - e.e.EncodeTime(*v) - case *[]byte: - if *v == nil { - e.e.EncodeNil() - } else { - e.e.EncodeStringBytesRaw(*v) - } + e.e.EncodeBytes(v) // e.e.EncodeStringBytesRaw(v) default: // we can't check non-predefined types, as they might be a Selfer or extension. - if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(iv, e) { - e.encodeValue(rv, nil) - } + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) } + return } // encodeValue will encode a value. // // Note that encodeValue will handle nil in the stream early, so that the // subsequent calls i.e. kXXX methods, etc do not have to handle it themselves. -func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn) { +func (e *encoder[T]) encodeValue(rv reflect.Value, fn *encFn[T]) { + // MARKER: We check if value is nil here, so that the kXXX method do not have to. // if a valid fn is passed, it MUST BE for the dereferenced type of rv - // MARKER: We check if value is nil here, so that the kXXX method do not have to. + var ciPushes int + // if e.h.CheckCircularRef { + // ciPushes = e.ci.pushRV(rv) + // } - var sptr interface{} var rvp reflect.Value var rvpValid bool -TOP: + +RV: switch rv.Kind() { case reflect.Ptr: if rvIsNil(rv) { e.e.EncodeNil() - return + goto END } rvpValid = true rvp = rv rv = rv.Elem() - goto TOP + // fn = nil // underlying type still same - no change + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV case reflect.Interface: if rvIsNil(rv) { e.e.EncodeNil() - return + goto END } rvpValid = false rvp = reflect.Value{} rv = rv.Elem() - goto TOP - case reflect.Struct: - if rvpValid && e.h.CheckCircularRef { - sptr = rv2i(rvp) - for _, vv := range e.ci { - if eq4i(sptr, vv) { // error if sptr already seen - e.errorf("circular reference found: %p, %T", sptr, sptr) - } - } - e.ci = append(e.ci, sptr) - } - case reflect.Slice, reflect.Map, reflect.Chan: + fn = nil // underlying type may change, so prompt a reset + goto RV + case reflect.Map: if rvIsNil(rv) { - e.e.EncodeNil() - return + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END } case reflect.Invalid, reflect.Func: e.e.EncodeNil() - return + goto END } if fn == nil { - fn = e.h.fn(rv.Type()) + fn = e.fn(rv.Type()) } if !fn.i.addrE { // typically, addrE = false, so check it first // keep rv same } else if rvpValid { rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) } else { rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) } fn.fe(e, &fn.i, rv) - if sptr != nil { // remove sptr - e.ci = e.ci[:len(e.ci)-1] +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) } } -// encodeValueNonNil can encode a number, bool, or string -// OR non-nil values of kind map, slice and chan. -func (e *Encoder) encodeValueNonNil(rv reflect.Value, fn *codecFn) { - if fn == nil { - fn = e.h.fn(rv.Type()) - } - +func (e *encoder[T]) encodeValueNonNil(rv reflect.Value, fn *encFn[T]) { + // only call this if a primitive (number, bool, string) OR + // a non-nil collection (map/slice/chan). + // + // Expects fn to be non-nil if fn.i.addrE { // typically, addrE = false, so check it first - rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } } fn.fe(e, &fn.i, rv) } -// addrRV returns a addressable value which may be readonly -func (e *Encoder) addrRV(rv reflect.Value, typ, ptrType reflect.Type) (rva reflect.Value) { - if rv.CanAddr() { - return rvAddr(rv, ptrType) +func (e *encoder[T]) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) } - if e.h.NoAddressableReadonly { - rva = reflect.New(typ) - rvSetDirect(rva.Elem(), rv) - return - } - return rvAddr(e.perType.AddressableRO(rv), ptrType) } -func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { - e.onerror(fnerr) +func (e *encoder[T]) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) if bs == nil { e.e.EncodeNil() } else { @@ -1379,148 +1273,380 @@ func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { } } -func (e *Encoder) marshalAsis(bs []byte, fnerr error) { - e.onerror(fnerr) +func (e *encoder[T]) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) if bs == nil { e.e.EncodeNil() } else { - e.encWr.writeb(bs) // e.asis(bs) + e.e.writeBytesAsis(bs) // e.asis(bs) } } -func (e *Encoder) marshalRaw(bs []byte, fnerr error) { - e.onerror(fnerr) - if bs == nil { - e.e.EncodeNil() - } else { - e.e.EncodeStringBytesRaw(bs) - } +func (e *encoder[T]) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) } -func (e *Encoder) rawBytes(vv Raw) { +func (e *encoder[T]) rawBytes(vv Raw) { v := []byte(vv) if !e.h.Raw { - e.errorf("Raw values cannot be encoded: %v", v) + halt.errorBytes("Raw values cannot be encoded: ", v) } - e.encWr.writeb(v) + e.e.writeBytesAsis(v) } -func (e *Encoder) wrapErr(v error, err *error) { - *err = wrapCodecErr(v, e.hh.Name(), 0, true) +func (e *encoder[T]) fn(t reflect.Type) *encFn[T] { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoder[T]) fnNoExt(t reflect.Type) *encFn[T] { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) } // ---- container tracker methods // Note: We update the .c after calling the callback. -// This way, the callback can know what the last status was. +// +// Callbacks ie Write(Map|Array)XXX should not use the containerState. +// It is there for post-callback use. +// Instead, callbacks have a parameter to tell if first time or not. +// +// Some code is commented out below, as they are manually inlined. +// Commented code is retained here for convernience. -func (e *Encoder) mapStart(length int) { +func (e *encoder[T]) mapStart(length int) { e.e.WriteMapStart(length) e.c = containerMapStart } -func (e *Encoder) mapElemKey() { - if e.js { - e.jsondriver().WriteMapElemKey() - } - e.c = containerMapKey -} +// func (e *encoder[T]) mapElemKey(firstTime bool) { +// e.e.WriteMapElemKey(firstTime) +// e.c = containerMapKey +// } -func (e *Encoder) mapElemValue() { - if e.js { - e.jsondriver().WriteMapElemValue() - } +func (e *encoder[T]) mapElemValue() { + e.e.WriteMapElemValue() e.c = containerMapValue } -func (e *Encoder) mapEnd() { - e.e.WriteMapEnd() - e.c = 0 -} +// func (e *encoder[T]) mapEnd() { +// e.e.WriteMapEnd() +// e.c = 0 +// } -func (e *Encoder) arrayStart(length int) { +func (e *encoder[T]) arrayStart(length int) { e.e.WriteArrayStart(length) e.c = containerArrayStart } -func (e *Encoder) arrayElem() { - if e.js { - e.jsondriver().WriteArrayElem() - } - e.c = containerArrayElem -} +// func (e *encoder[T]) arrayElem(firstTime bool) { +// e.e.WriteArrayElem(firstTime) +// e.c = containerArrayElem +// } -func (e *Encoder) arrayEnd() { - e.e.WriteArrayEnd() - e.c = 0 -} +// func (e *encoder[T]) arrayEnd() { +// e.e.WriteArrayEnd() +// e.c = 0 +// } // ---------- -func (e *Encoder) haltOnMbsOddLen(length int) { - if length&1 != 0 { // similar to &1==1 or %2 == 1 - e.errorf("mapBySlice requires even slice length, but got %v", length) +func (e *encoder[T]) writerEnd() { + e.e.writerEnd() +} + +func (e *encoder[T]) atEndOfEncode() { + e.e.atEndOfEncode() +} + +// Reset resets the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *encoder[T]) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) } -} - -func (e *Encoder) atEndOfEncode() { - // e.e.atEndOfEncode() - if e.js { - e.jsondriver().atEndOfEncode() + e.reset() + if w == nil { + w = io.Discard } + e.e.resetOutIO(w) } -func (e *Encoder) sideEncode(v interface{}, basetype reflect.Type, bs *[]byte) { - // rv := baseRV(v) - // e2 := NewEncoderBytes(bs, e.hh) - // e2.encodeValue(rv, e2.h.fnNoExt(basetype)) - // e2.atEndOfEncode() - // e2.w().end() - - defer func(wb bytesEncAppender, bytes bool, c containerState, state interface{}) { - e.wb = wb - e.bytes = bytes - e.c = c - e.e.restoreState(state) - }(e.wb, e.bytes, e.c, e.e.captureState()) - - e.wb = bytesEncAppender{encInBytes(bs)[:0], bs} - e.bytes = true - e.c = 0 - e.e.resetState() - - // must call using fnNoExt - rv := baseRV(v) - e.encodeValue(rv, e.h.fnNoExt(basetype)) - e.atEndOfEncode() - e.w().end() +// ResetBytes resets the Encoder with a new destination output []byte. +func (e *encoder[T]) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) } -func encInBytes(out *[]byte) (in []byte) { - in = *out - if in == nil { - in = make([]byte, defEncByteBufSize) +// only call this iff you are sure it is a bytes encoder +func (e *encoder[T]) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +// ---- + +func (helperEncDriver[T]) newEncoderBytes(out *[]byte, h Handle) *encoder[T] { + var c1 encoder[T] + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriver[T]) newEncoderIO(out io.Writer, h Handle) *encoder[T] { + var c1 encoder[T] + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriver[T]) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEs[T]) (f *fastpathE[T], u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt } return } -func encStructFieldKey(encName string, ee encDriver, w *encWr, - keyType valueType, encNameAsciiAlphaNum bool, js bool) { - // use if-else-if, not switch (which compiles to binary-search) - // since keyType is typically valueTypeString, branch prediction is pretty good. +// ---- - if keyType == valueTypeString { - if js && encNameAsciiAlphaNum { // keyType == valueTypeString - w.writeqstr(encName) - } else { // keyType == valueTypeString - ee.EncodeString(encName) +func (helperEncDriver[T]) encFindRtidFn(s []encRtidFn[T], rtid uintptr) (i uint, fn *encFn[T]) { + // binary search. Adapted from sort/search.go. Use goto (not for loop) to allow inlining. + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h } - } else if keyType == valueTypeInt { - ee.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) - } else if keyType == valueTypeUint { - ee.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) - } else if keyType == valueTypeFloat { - ee.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) - } else { - halt.errorf("invalid struct key type: %v", keyType) + goto LOOP } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriver[T]) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFn[T]) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFn[T]](v)) + } + return +} + +func (dh helperEncDriver[T]) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEs[T], checkExt bool) (fn *encFn[T]) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriver[T]) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEs[T], + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFn[T]) { + rtid := rt2id(rt) + var sp []encRtidFn[T] = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriver[T]) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEs[T], + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFn[T]) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFn[T] + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + // since this is an atomic load/store, we MUST use a different array each time, + // else we have a data race when a store is happening simultaneously with a encFindRtidFn call. + if sp == nil { + sp = []encRtidFn[T]{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFn[T], len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFn[T]{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriver[T]) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEs[T], + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFn[T]) { + fn = new(encFn[T]) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + // anything can be an extension except the built-in ones: time, raw and rawext. + // ensure we check for these types, then if extension, before checking if + // it implementes one of the pre-declared interfaces. + + // fi.addrEf = true + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoder[T]).kTime + } else if rtid == rawTypId { + fn.fe = (*encoder[T]).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoder[T]).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoder[T]).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoder[T]).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoder[T]).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*encoder[T]).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoder[T]).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + // by default (without using unsafe), + // if an array is not addressable, converting from an array to a slice + // requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array). + // + // (Non-addressable arrays mostly occur as keys/values from a map). + // + // However, fastpath functions are mostly for slices of numbers or strings, + // which are small by definition and thus allocation should be fast/cheap in time. + // + // Consequently, the value of doing this quick allocation to elide the overhead cost of + // non-optimized (not-unsafe) reflection is a fair price. + var rtid2 uintptr + if !ti.flagHasPkgPath { // un-named type (slice or mpa or array) + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { // named type (with underlying type of map or slice or array) + // try to use mapping for underlying type + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoder[T], xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoder[T]).kBool + case reflect.String: + // Do not use different functions based on StringToRaw option, as that will statically + // set the function for a string type, and if the Handle is modified thereafter, + // behaviour is non-deterministic + // i.e. DO NOT DO: + // if x.StringToRaw { + // fn.fe = (*encoder[T]).kStringToRaw + // } else { + // fn.fe = (*encoder[T]).kStringEnc + // } + + fn.fe = (*encoder[T]).kString + case reflect.Int: + fn.fe = (*encoder[T]).kInt + case reflect.Int8: + fn.fe = (*encoder[T]).kInt8 + case reflect.Int16: + fn.fe = (*encoder[T]).kInt16 + case reflect.Int32: + fn.fe = (*encoder[T]).kInt32 + case reflect.Int64: + fn.fe = (*encoder[T]).kInt64 + case reflect.Uint: + fn.fe = (*encoder[T]).kUint + case reflect.Uint8: + fn.fe = (*encoder[T]).kUint8 + case reflect.Uint16: + fn.fe = (*encoder[T]).kUint16 + case reflect.Uint32: + fn.fe = (*encoder[T]).kUint32 + case reflect.Uint64: + fn.fe = (*encoder[T]).kUint64 + case reflect.Uintptr: + fn.fe = (*encoder[T]).kUintptr + case reflect.Float32: + fn.fe = (*encoder[T]).kFloat32 + case reflect.Float64: + fn.fe = (*encoder[T]).kFloat64 + case reflect.Complex64: + fn.fe = (*encoder[T]).kComplex64 + case reflect.Complex128: + fn.fe = (*encoder[T]).kComplex128 + case reflect.Chan: + fn.fe = (*encoder[T]).kChan + case reflect.Slice: + fn.fe = (*encoder[T]).kSlice + case reflect.Array: + fn.fe = (*encoder[T]).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoder[T]).kStructSimple + } else { + fn.fe = (*encoder[T]).kStruct + } + case reflect.Map: + fn.fe = (*encoder[T]).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fe = (*encoder[T]).kErr + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fe = (*encoder[T]).kErr + } + } + } + return } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go deleted file mode 100644 index 941ef798f..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ /dev/null @@ -1,6157 +0,0 @@ -//go:build !notfastpath && !codec.notfastpath -// +build !notfastpath,!codec.notfastpath - -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from fast-path.go.tmpl - DO NOT EDIT. - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register them in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types (numeric, bool, string, []byte) -// - maps of builtin types to builtin or interface{} type, EXCEPT FOR -// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{} -// AND values of type type int8/16/32, uint16/32 -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -type fastpathT struct{} - -var fastpathTV fastpathT - -type fastpathE struct { - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} - -type fastpathA [56]fastpathE -type fastpathARtid [56]uintptr - -var fastpathAv fastpathA -var fastpathAvRtid fastpathARtid - -type fastpathAslice struct{} - -func (fastpathAslice) Len() int { return 56 } -func (fastpathAslice) Less(i, j int) bool { - return fastpathAvRtid[uint(i)] < fastpathAvRtid[uint(j)] -} -func (fastpathAslice) Swap(i, j int) { - fastpathAvRtid[uint(i)], fastpathAvRtid[uint(j)] = fastpathAvRtid[uint(j)], fastpathAvRtid[uint(i)] - fastpathAv[uint(i)], fastpathAv[uint(j)] = fastpathAv[uint(j)], fastpathAv[uint(i)] -} - -func fastpathAvIndex(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - // Note: we use goto (instead of for loop) so this can be inlined. - // h, i, j := 0, 0, 56 - var h, i uint - var j uint = 56 -LOOP: - if i < j { - h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2 - if fastpathAvRtid[h] < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP - } - if i < 56 && fastpathAvRtid[i] == rtid { - return int(i) - } - return -1 -} - -// due to possible initialization loop error, make fastpath in an init() -func init() { - var i uint = 0 - fn := func(v interface{}, - fe func(*Encoder, *codecFnInfo, reflect.Value), - fd func(*Decoder, *codecFnInfo, reflect.Value)) { - xrt := reflect.TypeOf(v) - xptr := rt2id(xrt) - fastpathAvRtid[i] = xptr - fastpathAv[i] = fastpathE{xrt, fe, fd} - i++ - } - - fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) - fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) - fn([][]byte(nil), (*Encoder).fastpathEncSliceBytesR, (*Decoder).fastpathDecSliceBytesR) - fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) - fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) - fn([]uint8(nil), (*Encoder).fastpathEncSliceUint8R, (*Decoder).fastpathDecSliceUint8R) - fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) - fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) - fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) - fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) - fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) - - fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) - fn(map[string][]byte(nil), (*Encoder).fastpathEncMapStringBytesR, (*Decoder).fastpathDecMapStringBytesR) - fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) - fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) - fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) - fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) - fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) - fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) - fn(map[uint8][]byte(nil), (*Encoder).fastpathEncMapUint8BytesR, (*Decoder).fastpathDecMapUint8BytesR) - fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) - fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) - fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) - fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) - fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) - fn(map[uint64][]byte(nil), (*Encoder).fastpathEncMapUint64BytesR, (*Decoder).fastpathDecMapUint64BytesR) - fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) - fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) - fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) - fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) - fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) - fn(map[int][]byte(nil), (*Encoder).fastpathEncMapIntBytesR, (*Decoder).fastpathDecMapIntBytesR) - fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) - fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) - fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) - fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) - fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) - fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) - fn(map[int32][]byte(nil), (*Encoder).fastpathEncMapInt32BytesR, (*Decoder).fastpathDecMapInt32BytesR) - fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) - fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) - fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) - fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) - fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) - - sort.Sort(fastpathAslice{}) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - case []interface{}: - fastpathTV.EncSliceIntfV(v, e) - case *[]interface{}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceIntfV(*v, e) - } - case []string: - fastpathTV.EncSliceStringV(v, e) - case *[]string: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceStringV(*v, e) - } - case [][]byte: - fastpathTV.EncSliceBytesV(v, e) - case *[][]byte: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceBytesV(*v, e) - } - case []float32: - fastpathTV.EncSliceFloat32V(v, e) - case *[]float32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceFloat32V(*v, e) - } - case []float64: - fastpathTV.EncSliceFloat64V(v, e) - case *[]float64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceFloat64V(*v, e) - } - case []uint8: - fastpathTV.EncSliceUint8V(v, e) - case *[]uint8: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceUint8V(*v, e) - } - case []uint64: - fastpathTV.EncSliceUint64V(v, e) - case *[]uint64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceUint64V(*v, e) - } - case []int: - fastpathTV.EncSliceIntV(v, e) - case *[]int: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceIntV(*v, e) - } - case []int32: - fastpathTV.EncSliceInt32V(v, e) - case *[]int32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceInt32V(*v, e) - } - case []int64: - fastpathTV.EncSliceInt64V(v, e) - case *[]int64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceInt64V(*v, e) - } - case []bool: - fastpathTV.EncSliceBoolV(v, e) - case *[]bool: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncSliceBoolV(*v, e) - } - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, e) - case *map[string]interface{}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringIntfV(*v, e) - } - case map[string]string: - fastpathTV.EncMapStringStringV(v, e) - case *map[string]string: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringStringV(*v, e) - } - case map[string][]byte: - fastpathTV.EncMapStringBytesV(v, e) - case *map[string][]byte: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringBytesV(*v, e) - } - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, e) - case *map[string]uint8: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringUint8V(*v, e) - } - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, e) - case *map[string]uint64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringUint64V(*v, e) - } - case map[string]int: - fastpathTV.EncMapStringIntV(v, e) - case *map[string]int: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringIntV(*v, e) - } - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, e) - case *map[string]int32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringInt32V(*v, e) - } - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, e) - case *map[string]float64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringFloat64V(*v, e) - } - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, e) - case *map[string]bool: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapStringBoolV(*v, e) - } - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, e) - case *map[uint8]interface{}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8IntfV(*v, e) - } - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, e) - case *map[uint8]string: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8StringV(*v, e) - } - case map[uint8][]byte: - fastpathTV.EncMapUint8BytesV(v, e) - case *map[uint8][]byte: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8BytesV(*v, e) - } - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, e) - case *map[uint8]uint8: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8Uint8V(*v, e) - } - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, e) - case *map[uint8]uint64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8Uint64V(*v, e) - } - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, e) - case *map[uint8]int: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8IntV(*v, e) - } - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, e) - case *map[uint8]int32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8Int32V(*v, e) - } - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, e) - case *map[uint8]float64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8Float64V(*v, e) - } - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, e) - case *map[uint8]bool: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint8BoolV(*v, e) - } - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, e) - case *map[uint64]interface{}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64IntfV(*v, e) - } - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, e) - case *map[uint64]string: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64StringV(*v, e) - } - case map[uint64][]byte: - fastpathTV.EncMapUint64BytesV(v, e) - case *map[uint64][]byte: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64BytesV(*v, e) - } - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, e) - case *map[uint64]uint8: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64Uint8V(*v, e) - } - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, e) - case *map[uint64]uint64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64Uint64V(*v, e) - } - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, e) - case *map[uint64]int: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64IntV(*v, e) - } - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, e) - case *map[uint64]int32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64Int32V(*v, e) - } - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, e) - case *map[uint64]float64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64Float64V(*v, e) - } - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, e) - case *map[uint64]bool: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapUint64BoolV(*v, e) - } - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, e) - case *map[int]interface{}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntIntfV(*v, e) - } - case map[int]string: - fastpathTV.EncMapIntStringV(v, e) - case *map[int]string: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntStringV(*v, e) - } - case map[int][]byte: - fastpathTV.EncMapIntBytesV(v, e) - case *map[int][]byte: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntBytesV(*v, e) - } - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, e) - case *map[int]uint8: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntUint8V(*v, e) - } - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, e) - case *map[int]uint64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntUint64V(*v, e) - } - case map[int]int: - fastpathTV.EncMapIntIntV(v, e) - case *map[int]int: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntIntV(*v, e) - } - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, e) - case *map[int]int32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntInt32V(*v, e) - } - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, e) - case *map[int]float64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntFloat64V(*v, e) - } - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, e) - case *map[int]bool: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapIntBoolV(*v, e) - } - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, e) - case *map[int32]interface{}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32IntfV(*v, e) - } - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, e) - case *map[int32]string: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32StringV(*v, e) - } - case map[int32][]byte: - fastpathTV.EncMapInt32BytesV(v, e) - case *map[int32][]byte: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32BytesV(*v, e) - } - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, e) - case *map[int32]uint8: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32Uint8V(*v, e) - } - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, e) - case *map[int32]uint64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32Uint64V(*v, e) - } - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, e) - case *map[int32]int: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32IntV(*v, e) - } - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, e) - case *map[int32]int32: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32Int32V(*v, e) - } - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, e) - case *map[int32]float64: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32Float64V(*v, e) - } - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, e) - case *map[int32]bool: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.EncMapInt32BoolV(*v, e) - } - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions -func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { - var v []interface{} - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]interface{}) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntfV(v, e) - } else { - fastpathTV.EncSliceIntfV(v, e) - } -} -func (fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.encode(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.encode(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { - var v []string - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]string) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceStringV(v, e) - } else { - fastpathTV.EncSliceStringV(v, e) - } -} -func (fastpathT) EncSliceStringV(v []string, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeString(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeString(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceBytesR(f *codecFnInfo, rv reflect.Value) { - var v [][]byte - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([][]byte) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceBytesV(v, e) - } else { - fastpathTV.EncSliceBytesV(v, e) - } -} -func (fastpathT) EncSliceBytesV(v [][]byte, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeStringBytesRaw(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceBytesV(v [][]byte, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeStringBytesRaw(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { - var v []float32 - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]float32) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat32V(v, e) - } else { - fastpathTV.EncSliceFloat32V(v, e) - } -} -func (fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeFloat32(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeFloat32(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { - var v []float64 - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]float64) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat64V(v, e) - } else { - fastpathTV.EncSliceFloat64V(v, e) - } -} -func (fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeFloat64(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeFloat64(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) { - var v []uint8 - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]uint8) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint8V(v, e) - } else { - fastpathTV.EncSliceUint8V(v, e) - } -} -func (fastpathT) EncSliceUint8V(v []uint8, e *Encoder) { - e.e.EncodeStringBytesRaw(v) -} -func (fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeUint(uint64(v[j])) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { - var v []uint64 - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]uint64) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint64V(v, e) - } else { - fastpathTV.EncSliceUint64V(v, e) - } -} -func (fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeUint(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeUint(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { - var v []int - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]int) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntV(v, e) - } else { - fastpathTV.EncSliceIntV(v, e) - } -} -func (fastpathT) EncSliceIntV(v []int, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeInt(int64(v[j])) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeInt(int64(v[j])) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { - var v []int32 - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]int32) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt32V(v, e) - } else { - fastpathTV.EncSliceInt32V(v, e) - } -} -func (fastpathT) EncSliceInt32V(v []int32, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeInt(int64(v[j])) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeInt(int64(v[j])) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { - var v []int64 - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]int64) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt64V(v, e) - } else { - fastpathTV.EncSliceInt64V(v, e) - } -} -func (fastpathT) EncSliceInt64V(v []int64, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeInt(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeInt(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { - var v []bool - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]bool) - } - if f.ti.mbs { - fastpathTV.EncAsMapSliceBoolV(v, e) - } else { - fastpathTV.EncSliceBoolV(v, e) - } -} -func (fastpathT) EncSliceBoolV(v []bool, e *Encoder) { - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - e.e.EncodeBool(v[j]) - } - e.arrayEnd() -} -func (fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { - e.haltOnMbsOddLen(len(v)) - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - e.e.EncodeBool(v[j]) - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) -} -func (fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.encode(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.encode(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) -} -func (fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeString(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeString(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringBytesR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) -} -func (fastpathT) EncMapStringBytesV(v map[string][]byte, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) -} -func (fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeUint(uint64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeUint(uint64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) -} -func (fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeUint(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeUint(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) -} -func (fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) -} -func (fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) -} -func (fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeFloat64(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeFloat64(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) -} -func (fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]string, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeBool(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeString(k2) - e.mapElemValue() - e.e.EncodeBool(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) -} -func (fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.encode(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.encode(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) -} -func (fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeString(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeString(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8BytesR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) -} -func (fastpathT) EncMapUint8BytesV(v map[uint8][]byte, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) -} -func (fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeUint(uint64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeUint(uint64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) -} -func (fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeUint(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeUint(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) -} -func (fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) -} -func (fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) -} -func (fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeFloat64(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeFloat64(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) -} -func (fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint8, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint8Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeBool(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(uint64(k2)) - e.mapElemValue() - e.e.EncodeBool(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) -} -func (fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.encode(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.encode(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) -} -func (fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeString(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeString(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64BytesR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) -} -func (fastpathT) EncMapUint64BytesV(v map[uint64][]byte, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) -} -func (fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeUint(uint64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeUint(uint64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) -} -func (fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeUint(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeUint(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) -} -func (fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) -} -func (fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) -} -func (fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeFloat64(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeFloat64(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) -} -func (fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(uint64Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeBool(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeUint(k2) - e.mapElemValue() - e.e.EncodeBool(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) -} -func (fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.encode(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.encode(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) -} -func (fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeString(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeString(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntBytesR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) -} -func (fastpathT) EncMapIntBytesV(v map[int][]byte, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) -} -func (fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(uint64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(uint64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) -} -func (fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) -} -func (fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) -} -func (fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) -} -func (fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeFloat64(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeFloat64(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) -} -func (fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeBool(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeBool(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) -} -func (fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.encode(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.encode(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) -} -func (fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeString(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeString(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32BytesR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) -} -func (fastpathT) EncMapInt32BytesV(v map[int32][]byte, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeStringBytesRaw(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) -} -func (fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(uint64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(uint64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) -} -func (fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeUint(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) -} -func (fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) -} -func (fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v[k2])) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeInt(int64(v2)) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) -} -func (fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeFloat64(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeFloat64(v2) - } - } - e.mapEnd() -} -func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) -} -func (fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { - e.mapStart(len(v)) - if e.h.Canonical { - v2 := make([]int32, len(v)) - var i uint - for k := range v { - v2[i] = k - i++ - } - sort.Sort(int32Slice(v2)) - for _, k2 := range v2 { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeBool(v[k2]) - } - } else { - for k2, v2 := range v { - e.mapElemKey() - e.e.EncodeInt(int64(k2)) - e.mapElemValue() - e.e.EncodeBool(v2) - } - } - e.mapEnd() -} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - var changed bool - var containerLen int - switch v := iv.(type) { - case []interface{}: - fastpathTV.DecSliceIntfN(v, d) - case *[]interface{}: - var v2 []interface{} - if v2, changed = fastpathTV.DecSliceIntfY(*v, d); changed { - *v = v2 - } - case []string: - fastpathTV.DecSliceStringN(v, d) - case *[]string: - var v2 []string - if v2, changed = fastpathTV.DecSliceStringY(*v, d); changed { - *v = v2 - } - case [][]byte: - fastpathTV.DecSliceBytesN(v, d) - case *[][]byte: - var v2 [][]byte - if v2, changed = fastpathTV.DecSliceBytesY(*v, d); changed { - *v = v2 - } - case []float32: - fastpathTV.DecSliceFloat32N(v, d) - case *[]float32: - var v2 []float32 - if v2, changed = fastpathTV.DecSliceFloat32Y(*v, d); changed { - *v = v2 - } - case []float64: - fastpathTV.DecSliceFloat64N(v, d) - case *[]float64: - var v2 []float64 - if v2, changed = fastpathTV.DecSliceFloat64Y(*v, d); changed { - *v = v2 - } - case []uint8: - fastpathTV.DecSliceUint8N(v, d) - case *[]uint8: - var v2 []uint8 - if v2, changed = fastpathTV.DecSliceUint8Y(*v, d); changed { - *v = v2 - } - case []uint64: - fastpathTV.DecSliceUint64N(v, d) - case *[]uint64: - var v2 []uint64 - if v2, changed = fastpathTV.DecSliceUint64Y(*v, d); changed { - *v = v2 - } - case []int: - fastpathTV.DecSliceIntN(v, d) - case *[]int: - var v2 []int - if v2, changed = fastpathTV.DecSliceIntY(*v, d); changed { - *v = v2 - } - case []int32: - fastpathTV.DecSliceInt32N(v, d) - case *[]int32: - var v2 []int32 - if v2, changed = fastpathTV.DecSliceInt32Y(*v, d); changed { - *v = v2 - } - case []int64: - fastpathTV.DecSliceInt64N(v, d) - case *[]int64: - var v2 []int64 - if v2, changed = fastpathTV.DecSliceInt64Y(*v, d); changed { - *v = v2 - } - case []bool: - fastpathTV.DecSliceBoolN(v, d) - case *[]bool: - var v2 []bool - if v2, changed = fastpathTV.DecSliceBoolY(*v, d); changed { - *v = v2 - } - case map[string]interface{}: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringIntfL(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]interface{}: - fastpathTV.DecMapStringIntfX(v, d) - case map[string]string: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringStringL(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]string: - fastpathTV.DecMapStringStringX(v, d) - case map[string][]byte: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringBytesL(v, containerLen, d) - } - d.mapEnd() - } - case *map[string][]byte: - fastpathTV.DecMapStringBytesX(v, d) - case map[string]uint8: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringUint8L(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]uint8: - fastpathTV.DecMapStringUint8X(v, d) - case map[string]uint64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringUint64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]uint64: - fastpathTV.DecMapStringUint64X(v, d) - case map[string]int: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringIntL(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]int: - fastpathTV.DecMapStringIntX(v, d) - case map[string]int32: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringInt32L(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]int32: - fastpathTV.DecMapStringInt32X(v, d) - case map[string]float64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringFloat64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]float64: - fastpathTV.DecMapStringFloat64X(v, d) - case map[string]bool: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapStringBoolL(v, containerLen, d) - } - d.mapEnd() - } - case *map[string]bool: - fastpathTV.DecMapStringBoolX(v, d) - case map[uint8]interface{}: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8IntfL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]interface{}: - fastpathTV.DecMapUint8IntfX(v, d) - case map[uint8]string: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8StringL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]string: - fastpathTV.DecMapUint8StringX(v, d) - case map[uint8][]byte: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8BytesL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8][]byte: - fastpathTV.DecMapUint8BytesX(v, d) - case map[uint8]uint8: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8Uint8L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]uint8: - fastpathTV.DecMapUint8Uint8X(v, d) - case map[uint8]uint64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8Uint64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]uint64: - fastpathTV.DecMapUint8Uint64X(v, d) - case map[uint8]int: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8IntL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]int: - fastpathTV.DecMapUint8IntX(v, d) - case map[uint8]int32: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8Int32L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]int32: - fastpathTV.DecMapUint8Int32X(v, d) - case map[uint8]float64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8Float64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]float64: - fastpathTV.DecMapUint8Float64X(v, d) - case map[uint8]bool: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint8BoolL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint8]bool: - fastpathTV.DecMapUint8BoolX(v, d) - case map[uint64]interface{}: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64IntfL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]interface{}: - fastpathTV.DecMapUint64IntfX(v, d) - case map[uint64]string: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64StringL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]string: - fastpathTV.DecMapUint64StringX(v, d) - case map[uint64][]byte: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64BytesL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64][]byte: - fastpathTV.DecMapUint64BytesX(v, d) - case map[uint64]uint8: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64Uint8L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]uint8: - fastpathTV.DecMapUint64Uint8X(v, d) - case map[uint64]uint64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64Uint64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]uint64: - fastpathTV.DecMapUint64Uint64X(v, d) - case map[uint64]int: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64IntL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]int: - fastpathTV.DecMapUint64IntX(v, d) - case map[uint64]int32: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64Int32L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]int32: - fastpathTV.DecMapUint64Int32X(v, d) - case map[uint64]float64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64Float64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]float64: - fastpathTV.DecMapUint64Float64X(v, d) - case map[uint64]bool: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapUint64BoolL(v, containerLen, d) - } - d.mapEnd() - } - case *map[uint64]bool: - fastpathTV.DecMapUint64BoolX(v, d) - case map[int]interface{}: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntIntfL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]interface{}: - fastpathTV.DecMapIntIntfX(v, d) - case map[int]string: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntStringL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]string: - fastpathTV.DecMapIntStringX(v, d) - case map[int][]byte: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntBytesL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int][]byte: - fastpathTV.DecMapIntBytesX(v, d) - case map[int]uint8: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntUint8L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]uint8: - fastpathTV.DecMapIntUint8X(v, d) - case map[int]uint64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntUint64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]uint64: - fastpathTV.DecMapIntUint64X(v, d) - case map[int]int: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntIntL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]int: - fastpathTV.DecMapIntIntX(v, d) - case map[int]int32: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntInt32L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]int32: - fastpathTV.DecMapIntInt32X(v, d) - case map[int]float64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntFloat64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]float64: - fastpathTV.DecMapIntFloat64X(v, d) - case map[int]bool: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapIntBoolL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int]bool: - fastpathTV.DecMapIntBoolX(v, d) - case map[int32]interface{}: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32IntfL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]interface{}: - fastpathTV.DecMapInt32IntfX(v, d) - case map[int32]string: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32StringL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]string: - fastpathTV.DecMapInt32StringX(v, d) - case map[int32][]byte: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32BytesL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32][]byte: - fastpathTV.DecMapInt32BytesX(v, d) - case map[int32]uint8: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32Uint8L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]uint8: - fastpathTV.DecMapInt32Uint8X(v, d) - case map[int32]uint64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32Uint64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]uint64: - fastpathTV.DecMapInt32Uint64X(v, d) - case map[int32]int: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32IntL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]int: - fastpathTV.DecMapInt32IntX(v, d) - case map[int32]int32: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32Int32L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]int32: - fastpathTV.DecMapInt32Int32X(v, d) - case map[int32]float64: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32Float64L(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]float64: - fastpathTV.DecMapInt32Float64X(v, d) - case map[int32]bool: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.DecMapInt32BoolL(v, containerLen, d) - } - d.mapEnd() - } - case *map[int32]bool: - fastpathTV.DecMapInt32BoolX(v, d) - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { - switch v := iv.(type) { - case *[]interface{}: - *v = nil - case *[]string: - *v = nil - case *[][]byte: - *v = nil - case *[]float32: - *v = nil - case *[]float64: - *v = nil - case *[]uint8: - *v = nil - case *[]uint64: - *v = nil - case *[]int: - *v = nil - case *[]int32: - *v = nil - case *[]int64: - *v = nil - case *[]bool: - *v = nil - - case *map[string]interface{}: - *v = nil - case *map[string]string: - *v = nil - case *map[string][]byte: - *v = nil - case *map[string]uint8: - *v = nil - case *map[string]uint64: - *v = nil - case *map[string]int: - *v = nil - case *map[string]int32: - *v = nil - case *map[string]float64: - *v = nil - case *map[string]bool: - *v = nil - case *map[uint8]interface{}: - *v = nil - case *map[uint8]string: - *v = nil - case *map[uint8][]byte: - *v = nil - case *map[uint8]uint8: - *v = nil - case *map[uint8]uint64: - *v = nil - case *map[uint8]int: - *v = nil - case *map[uint8]int32: - *v = nil - case *map[uint8]float64: - *v = nil - case *map[uint8]bool: - *v = nil - case *map[uint64]interface{}: - *v = nil - case *map[uint64]string: - *v = nil - case *map[uint64][]byte: - *v = nil - case *map[uint64]uint8: - *v = nil - case *map[uint64]uint64: - *v = nil - case *map[uint64]int: - *v = nil - case *map[uint64]int32: - *v = nil - case *map[uint64]float64: - *v = nil - case *map[uint64]bool: - *v = nil - case *map[int]interface{}: - *v = nil - case *map[int]string: - *v = nil - case *map[int][]byte: - *v = nil - case *map[int]uint8: - *v = nil - case *map[int]uint64: - *v = nil - case *map[int]int: - *v = nil - case *map[int]int32: - *v = nil - case *map[int]float64: - *v = nil - case *map[int]bool: - *v = nil - case *map[int32]interface{}: - *v = nil - case *map[int32]string: - *v = nil - case *map[int32][]byte: - *v = nil - case *map[int32]uint8: - *v = nil - case *map[int32]uint64: - *v = nil - case *map[int32]int: - *v = nil - case *map[int32]int32: - *v = nil - case *map[int32]float64: - *v = nil - case *map[int32]bool: - *v = nil - - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions - -func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { - var v []interface{} - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]interface{}) - var changed bool - if v, changed = fastpathTV.DecSliceIntfY(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceIntfN(v, d) - default: - fastpathTV.DecSliceIntfN(rv2i(rv).([]interface{}), d) - } -} -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { - if v, changed := f.DecSliceIntfY(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceIntfY(v []interface{}, d *Decoder) (v2 []interface{}, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]interface{}, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - v = make([]interface{}, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, nil) - changed = true - } - slh.ElemContainerState(j) - d.decode(&v[uint(j)]) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []interface{}{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceIntfN(v []interface{}, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - d.decode(&v[uint(j)]) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { - var v []string - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]string) - var changed bool - if v, changed = fastpathTV.DecSliceStringY(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceStringN(v, d) - default: - fastpathTV.DecSliceStringN(rv2i(rv).([]string), d) - } -} -func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { - if v, changed := f.DecSliceStringY(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceStringY(v []string, d *Decoder) (v2 []string, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]string, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) - v = make([]string, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, "") - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = d.stringZC(d.d.DecodeStringAsBytes()) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []string{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceStringN(v []string, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = d.stringZC(d.d.DecodeStringAsBytes()) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceBytesR(f *codecFnInfo, rv reflect.Value) { - var v [][]byte - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[][]byte) - var changed bool - if v, changed = fastpathTV.DecSliceBytesY(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceBytesN(v, d) - default: - fastpathTV.DecSliceBytesN(rv2i(rv).([][]byte), d) - } -} -func (f fastpathT) DecSliceBytesX(vp *[][]byte, d *Decoder) { - if v, changed := f.DecSliceBytesY(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceBytesY(v [][]byte, d *Decoder) (v2 [][]byte, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = [][]byte{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 24) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([][]byte, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 24) - v = make([][]byte, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, nil) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeBytes([]byte{}) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = [][]byte{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceBytesN(v [][]byte, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeBytes([]byte{}) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { - var v []float32 - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]float32) - var changed bool - if v, changed = fastpathTV.DecSliceFloat32Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceFloat32N(v, d) - default: - fastpathTV.DecSliceFloat32N(rv2i(rv).([]float32), d) - } -} -func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { - if v, changed := f.DecSliceFloat32Y(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceFloat32Y(v []float32, d *Decoder) (v2 []float32, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]float32, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - v = make([]float32, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = float32(d.decodeFloat32()) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []float32{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceFloat32N(v []float32, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = float32(d.decodeFloat32()) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { - var v []float64 - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]float64) - var changed bool - if v, changed = fastpathTV.DecSliceFloat64Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceFloat64N(v, d) - default: - fastpathTV.DecSliceFloat64N(rv2i(rv).([]float64), d) - } -} -func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { - if v, changed := f.DecSliceFloat64Y(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceFloat64Y(v []float64, d *Decoder) (v2 []float64, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]float64, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - v = make([]float64, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeFloat64() - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []float64{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceFloat64N(v []float64, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeFloat64() - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) { - var v []uint8 - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]uint8) - var changed bool - if v, changed = fastpathTV.DecSliceUint8Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceUint8N(v, d) - default: - fastpathTV.DecSliceUint8N(rv2i(rv).([]uint8), d) - } -} -func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) { - if v, changed := f.DecSliceUint8Y(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceUint8Y(v []uint8, d *Decoder) (v2 []uint8, changed bool) { - switch d.d.ContainerType() { - case valueTypeNil, valueTypeMap: - break - default: - v2 = d.decodeBytesInto(v[:len(v):len(v)]) - changed = !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) // not same slice - return - } - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []uint8{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint8, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - v = make([]uint8, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []uint8{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceUint8N(v []uint8, d *Decoder) { - switch d.d.ContainerType() { - case valueTypeNil, valueTypeMap: - break - default: - v2 := d.decodeBytesInto(v[:len(v):len(v)]) - if !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) { // not same slice - copy(v, v2) - } - return - } - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { - var v []uint64 - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]uint64) - var changed bool - if v, changed = fastpathTV.DecSliceUint64Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceUint64N(v, d) - default: - fastpathTV.DecSliceUint64N(rv2i(rv).([]uint64), d) - } -} -func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { - if v, changed := f.DecSliceUint64Y(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceUint64Y(v []uint64, d *Decoder) (v2 []uint64, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]uint64, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - v = make([]uint64, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeUint64() - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []uint64{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceUint64N(v []uint64, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeUint64() - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { - var v []int - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]int) - var changed bool - if v, changed = fastpathTV.DecSliceIntY(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceIntN(v, d) - default: - fastpathTV.DecSliceIntN(rv2i(rv).([]int), d) - } -} -func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { - if v, changed := f.DecSliceIntY(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceIntY(v []int, d *Decoder) (v2 []int, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - v = make([]int, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []int{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceIntN(v []int, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { - var v []int32 - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]int32) - var changed bool - if v, changed = fastpathTV.DecSliceInt32Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceInt32N(v, d) - default: - fastpathTV.DecSliceInt32N(rv2i(rv).([]int32), d) - } -} -func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { - if v, changed := f.DecSliceInt32Y(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceInt32Y(v []int32, d *Decoder) (v2 []int32, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int32, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) - v = make([]int32, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []int32{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceInt32N(v []int32, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { - var v []int64 - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]int64) - var changed bool - if v, changed = fastpathTV.DecSliceInt64Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceInt64N(v, d) - default: - fastpathTV.DecSliceInt64N(rv2i(rv).([]int64), d) - } -} -func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { - if v, changed := f.DecSliceInt64Y(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceInt64Y(v []int64, d *Decoder) (v2 []int64, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]int64, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) - v = make([]int64, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, 0) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeInt64() - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []int64{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceInt64N(v []int64, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeInt64() - } - slh.End() -} - -func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { - var v []bool - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]bool) - var changed bool - if v, changed = fastpathTV.DecSliceBoolY(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.DecSliceBoolN(v, d) - default: - fastpathTV.DecSliceBoolN(rv2i(rv).([]bool), d) - } -} -func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { - if v, changed := f.DecSliceBoolY(*vp, d); changed { - *vp = v - } -} -func (fastpathT) DecSliceBoolY(v []bool, d *Decoder) (v2 []bool, changed bool) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { - return - } - return nil, true - } - if containerLenS == 0 { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]bool, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) - v = make([]bool, uint(xlen)) - changed = true - } - if j >= len(v) { - v = append(v, false) - changed = true - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeBool() - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []bool{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) DecSliceBoolN(v []bool, d *Decoder) { - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - v[uint(j)] = d.d.DecodeBool() - } - slh.End() -} -func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]interface{}) - if *vp == nil { - *vp = make(map[string]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - fastpathTV.DecMapStringIntfL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - f.DecMapStringIntfL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]interface{} given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk string - var mv interface{} - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]string) - if *vp == nil { - *vp = make(map[string]string, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - fastpathTV.DecMapStringStringL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]string, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - f.DecMapStringStringL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringStringL(v map[string]string, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]string given stream length: %v", containerLen) - return - } - var mk string - var mv string - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = d.stringZC(d.d.DecodeStringAsBytes()) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringBytesR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string][]byte) - if *vp == nil { - *vp = make(map[string][]byte, decInferLen(containerLen, d.h.MaxInitLen, 40)) - } - if containerLen != 0 { - fastpathTV.DecMapStringBytesL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringBytesX(vp *map[string][]byte, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string][]byte, decInferLen(containerLen, d.h.MaxInitLen, 40)) - } - if containerLen != 0 { - f.DecMapStringBytesL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringBytesL(v map[string][]byte, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string][]byte given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset - var mk string - var mv []byte - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - mv = d.decodeBytesInto(mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]uint8) - if *vp == nil { - *vp = make(map[string]uint8, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - fastpathTV.DecMapStringUint8L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]uint8, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - f.DecMapStringUint8L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringUint8L(v map[string]uint8, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]uint8 given stream length: %v", containerLen) - return - } - var mk string - var mv uint8 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]uint64) - if *vp == nil { - *vp = make(map[string]uint64, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapStringUint64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]uint64, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapStringUint64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringUint64L(v map[string]uint64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]uint64 given stream length: %v", containerLen) - return - } - var mk string - var mv uint64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = d.d.DecodeUint64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]int) - if *vp == nil { - *vp = make(map[string]int, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapStringIntL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]int, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapStringIntL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringIntL(v map[string]int, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]int given stream length: %v", containerLen) - return - } - var mk string - var mv int - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]int32) - if *vp == nil { - *vp = make(map[string]int32, decInferLen(containerLen, d.h.MaxInitLen, 20)) - } - if containerLen != 0 { - fastpathTV.DecMapStringInt32L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]int32, decInferLen(containerLen, d.h.MaxInitLen, 20)) - } - if containerLen != 0 { - f.DecMapStringInt32L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringInt32L(v map[string]int32, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]int32 given stream length: %v", containerLen) - return - } - var mk string - var mv int32 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]float64) - if *vp == nil { - *vp = make(map[string]float64, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapStringFloat64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]float64, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapStringFloat64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringFloat64L(v map[string]float64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]float64 given stream length: %v", containerLen) - return - } - var mk string - var mv float64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = d.d.DecodeFloat64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[string]bool) - if *vp == nil { - *vp = make(map[string]bool, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - fastpathTV.DecMapStringBoolL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[string]bool, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - f.DecMapStringBoolL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapStringBoolL(v map[string]bool, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[string]bool given stream length: %v", containerLen) - return - } - var mk string - var mv bool - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.stringZC(d.d.DecodeStringAsBytes()) - d.mapElemValue() - mv = d.d.DecodeBool() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]interface{}) - if *vp == nil { - *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8IntfL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - f.DecMapUint8IntfL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]interface{} given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint8 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]string) - if *vp == nil { - *vp = make(map[uint8]string, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8StringL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]string, decInferLen(containerLen, d.h.MaxInitLen, 17)) - } - if containerLen != 0 { - f.DecMapUint8StringL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8StringL(v map[uint8]string, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]string given stream length: %v", containerLen) - return - } - var mk uint8 - var mv string - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = d.stringZC(d.d.DecodeStringAsBytes()) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8BytesR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8][]byte) - if *vp == nil { - *vp = make(map[uint8][]byte, decInferLen(containerLen, d.h.MaxInitLen, 25)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8BytesL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8BytesX(vp *map[uint8][]byte, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8][]byte, decInferLen(containerLen, d.h.MaxInitLen, 25)) - } - if containerLen != 0 { - f.DecMapUint8BytesL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8][]byte given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset - var mk uint8 - var mv []byte - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - mv = d.decodeBytesInto(mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]uint8) - if *vp == nil { - *vp = make(map[uint8]uint8, decInferLen(containerLen, d.h.MaxInitLen, 2)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8Uint8L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]uint8, decInferLen(containerLen, d.h.MaxInitLen, 2)) - } - if containerLen != 0 { - f.DecMapUint8Uint8L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]uint8 given stream length: %v", containerLen) - return - } - var mk uint8 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]uint64) - if *vp == nil { - *vp = make(map[uint8]uint64, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8Uint64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]uint64, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapUint8Uint64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]uint64 given stream length: %v", containerLen) - return - } - var mk uint8 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = d.d.DecodeUint64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]int) - if *vp == nil { - *vp = make(map[uint8]int, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8IntL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]int, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapUint8IntL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8IntL(v map[uint8]int, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]int given stream length: %v", containerLen) - return - } - var mk uint8 - var mv int - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]int32) - if *vp == nil { - *vp = make(map[uint8]int32, decInferLen(containerLen, d.h.MaxInitLen, 5)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8Int32L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]int32, decInferLen(containerLen, d.h.MaxInitLen, 5)) - } - if containerLen != 0 { - f.DecMapUint8Int32L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]int32 given stream length: %v", containerLen) - return - } - var mk uint8 - var mv int32 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]float64) - if *vp == nil { - *vp = make(map[uint8]float64, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8Float64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]float64, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapUint8Float64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]float64 given stream length: %v", containerLen) - return - } - var mk uint8 - var mv float64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = d.d.DecodeFloat64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint8]bool) - if *vp == nil { - *vp = make(map[uint8]bool, decInferLen(containerLen, d.h.MaxInitLen, 2)) - } - if containerLen != 0 { - fastpathTV.DecMapUint8BoolL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint8]bool, decInferLen(containerLen, d.h.MaxInitLen, 2)) - } - if containerLen != 0 { - f.DecMapUint8BoolL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint8]bool given stream length: %v", containerLen) - return - } - var mk uint8 - var mv bool - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - d.mapElemValue() - mv = d.d.DecodeBool() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]interface{}) - if *vp == nil { - *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64IntfL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapUint64IntfL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]interface{} given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint64 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]string) - if *vp == nil { - *vp = make(map[uint64]string, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64StringL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]string, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapUint64StringL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64StringL(v map[uint64]string, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]string given stream length: %v", containerLen) - return - } - var mk uint64 - var mv string - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = d.stringZC(d.d.DecodeStringAsBytes()) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64BytesR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64][]byte) - if *vp == nil { - *vp = make(map[uint64][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64BytesL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64BytesX(vp *map[uint64][]byte, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - f.DecMapUint64BytesL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64][]byte given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset - var mk uint64 - var mv []byte - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - mv = d.decodeBytesInto(mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]uint8) - if *vp == nil { - *vp = make(map[uint64]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64Uint8L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapUint64Uint8L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]uint8 given stream length: %v", containerLen) - return - } - var mk uint64 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]uint64) - if *vp == nil { - *vp = make(map[uint64]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64Uint64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - f.DecMapUint64Uint64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]uint64 given stream length: %v", containerLen) - return - } - var mk uint64 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = d.d.DecodeUint64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]int) - if *vp == nil { - *vp = make(map[uint64]int, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64IntL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]int, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - f.DecMapUint64IntL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64IntL(v map[uint64]int, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]int given stream length: %v", containerLen) - return - } - var mk uint64 - var mv int - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]int32) - if *vp == nil { - *vp = make(map[uint64]int32, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64Int32L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]int32, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - f.DecMapUint64Int32L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]int32 given stream length: %v", containerLen) - return - } - var mk uint64 - var mv int32 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]float64) - if *vp == nil { - *vp = make(map[uint64]float64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64Float64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]float64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - f.DecMapUint64Float64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]float64 given stream length: %v", containerLen) - return - } - var mk uint64 - var mv float64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = d.d.DecodeFloat64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[uint64]bool) - if *vp == nil { - *vp = make(map[uint64]bool, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapUint64BoolL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[uint64]bool, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapUint64BoolL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[uint64]bool given stream length: %v", containerLen) - return - } - var mk uint64 - var mv bool - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = d.d.DecodeUint64() - d.mapElemValue() - mv = d.d.DecodeBool() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]interface{}) - if *vp == nil { - *vp = make(map[int]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapIntIntfL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapIntIntfL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]interface{} given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int - var mv interface{} - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]string) - if *vp == nil { - *vp = make(map[int]string, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - fastpathTV.DecMapIntStringL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]string, decInferLen(containerLen, d.h.MaxInitLen, 24)) - } - if containerLen != 0 { - f.DecMapIntStringL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntStringL(v map[int]string, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]string given stream length: %v", containerLen) - return - } - var mk int - var mv string - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = d.stringZC(d.d.DecodeStringAsBytes()) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntBytesR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int][]byte) - if *vp == nil { - *vp = make(map[int][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - fastpathTV.DecMapIntBytesL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntBytesX(vp *map[int][]byte, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32)) - } - if containerLen != 0 { - f.DecMapIntBytesL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntBytesL(v map[int][]byte, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int][]byte given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset - var mk int - var mv []byte - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - mv = d.decodeBytesInto(mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]uint8) - if *vp == nil { - *vp = make(map[int]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapIntUint8L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapIntUint8L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntUint8L(v map[int]uint8, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]uint8 given stream length: %v", containerLen) - return - } - var mk int - var mv uint8 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]uint64) - if *vp == nil { - *vp = make(map[int]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - fastpathTV.DecMapIntUint64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - f.DecMapIntUint64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntUint64L(v map[int]uint64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]uint64 given stream length: %v", containerLen) - return - } - var mk int - var mv uint64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = d.d.DecodeUint64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]int) - if *vp == nil { - *vp = make(map[int]int, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - fastpathTV.DecMapIntIntL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]int, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - f.DecMapIntIntL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntIntL(v map[int]int, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]int given stream length: %v", containerLen) - return - } - var mk int - var mv int - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]int32) - if *vp == nil { - *vp = make(map[int]int32, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - fastpathTV.DecMapIntInt32L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]int32, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - f.DecMapIntInt32L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntInt32L(v map[int]int32, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]int32 given stream length: %v", containerLen) - return - } - var mk int - var mv int32 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]float64) - if *vp == nil { - *vp = make(map[int]float64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - fastpathTV.DecMapIntFloat64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]float64, decInferLen(containerLen, d.h.MaxInitLen, 16)) - } - if containerLen != 0 { - f.DecMapIntFloat64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntFloat64L(v map[int]float64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]float64 given stream length: %v", containerLen) - return - } - var mk int - var mv float64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = d.d.DecodeFloat64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int]bool) - if *vp == nil { - *vp = make(map[int]bool, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - fastpathTV.DecMapIntBoolL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int]bool, decInferLen(containerLen, d.h.MaxInitLen, 9)) - } - if containerLen != 0 { - f.DecMapIntBoolL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapIntBoolL(v map[int]bool, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int]bool given stream length: %v", containerLen) - return - } - var mk int - var mv bool - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - d.mapElemValue() - mv = d.d.DecodeBool() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]interface{}) - if *vp == nil { - *vp = make(map[int32]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 20)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32IntfL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 20)) - } - if containerLen != 0 { - f.DecMapInt32IntfL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]interface{} given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - var mk int32 - var mv interface{} - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]string) - if *vp == nil { - *vp = make(map[int32]string, decInferLen(containerLen, d.h.MaxInitLen, 20)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32StringL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]string, decInferLen(containerLen, d.h.MaxInitLen, 20)) - } - if containerLen != 0 { - f.DecMapInt32StringL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32StringL(v map[int32]string, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]string given stream length: %v", containerLen) - return - } - var mk int32 - var mv string - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = d.stringZC(d.d.DecodeStringAsBytes()) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32BytesR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32][]byte) - if *vp == nil { - *vp = make(map[int32][]byte, decInferLen(containerLen, d.h.MaxInitLen, 28)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32BytesL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32BytesX(vp *map[int32][]byte, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32][]byte, decInferLen(containerLen, d.h.MaxInitLen, 28)) - } - if containerLen != 0 { - f.DecMapInt32BytesL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32][]byte given stream length: %v", containerLen) - return - } - mapGet := v != nil && !d.h.MapValueReset - var mk int32 - var mv []byte - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - if mapGet { - mv = v[mk] - } else { - mv = nil - } - mv = d.decodeBytesInto(mv) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]uint8) - if *vp == nil { - *vp = make(map[int32]uint8, decInferLen(containerLen, d.h.MaxInitLen, 5)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32Uint8L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]uint8, decInferLen(containerLen, d.h.MaxInitLen, 5)) - } - if containerLen != 0 { - f.DecMapInt32Uint8L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]uint8 given stream length: %v", containerLen) - return - } - var mk int32 - var mv uint8 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]uint64) - if *vp == nil { - *vp = make(map[int32]uint64, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32Uint64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]uint64, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - f.DecMapInt32Uint64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]uint64 given stream length: %v", containerLen) - return - } - var mk int32 - var mv uint64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = d.d.DecodeUint64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]int) - if *vp == nil { - *vp = make(map[int32]int, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32IntL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]int, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - f.DecMapInt32IntL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32IntL(v map[int32]int, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]int given stream length: %v", containerLen) - return - } - var mk int32 - var mv int - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]int32) - if *vp == nil { - *vp = make(map[int32]int32, decInferLen(containerLen, d.h.MaxInitLen, 8)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32Int32L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]int32, decInferLen(containerLen, d.h.MaxInitLen, 8)) - } - if containerLen != 0 { - f.DecMapInt32Int32L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]int32 given stream length: %v", containerLen) - return - } - var mk int32 - var mv int32 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]float64) - if *vp == nil { - *vp = make(map[int32]float64, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32Float64L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]float64, decInferLen(containerLen, d.h.MaxInitLen, 12)) - } - if containerLen != 0 { - f.DecMapInt32Float64L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]float64 given stream length: %v", containerLen) - return - } - var mk int32 - var mv float64 - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = d.d.DecodeFloat64() - v[mk] = mv - } -} -func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[int32]bool) - if *vp == nil { - *vp = make(map[int32]bool, decInferLen(containerLen, d.h.MaxInitLen, 5)) - } - if containerLen != 0 { - fastpathTV.DecMapInt32BoolL(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[int32]bool, decInferLen(containerLen, d.h.MaxInitLen, 5)) - } - if containerLen != 0 { - f.DecMapInt32BoolL(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *Decoder) { - if v == nil { - d.errorf("cannot decode into nil map[int32]bool given stream length: %v", containerLen) - return - } - var mk int32 - var mv bool - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) - d.mapElemValue() - mv = d.d.DecodeBool() - v[mk] = mv - } -} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl deleted file mode 100644 index 1a1cb95c6..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ /dev/null @@ -1,555 +0,0 @@ -// +build !notfastpath -// +build !codec.notfastpath - -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from fast-path.go.tmpl - DO NOT EDIT. - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register them in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types (numeric, bool, string, []byte) -// - maps of builtin types to builtin or interface{} type, EXCEPT FOR -// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{} -// AND values of type type int8/16/32, uint16/32 -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -{{/* -fastpathEncMapStringUint64R (called by fastpath...switch) -EncMapStringUint64V (called by codecgen) - -fastpathEncSliceBoolR: (called by fastpath...switch) (checks f.ti.mbs and calls one of them below) -EncSliceBoolV (also called by codecgen) -EncAsMapSliceBoolV (delegate when mapbyslice=true) - -fastpathDecSliceIntfR (called by fastpath...switch) (calls Y or N below depending on if it can be updated) -DecSliceIntfX (called by codecgen) (calls Y below) -DecSliceIntfY (delegate when slice CAN be updated) -DecSliceIntfN (delegate when slice CANNOT be updated e.g. from array or non-addressable slice) - -fastpathDecMap...R (called by fastpath...switch) (calls L or X? below) -DecMap...X (called by codecgen) -DecMap...L (delegated to by both above) -*/ -}} - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -{{/* -const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" -*/ -}} - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - {{/* rtid uintptr */ -}} - rt reflect.Type - encfn func(*Encoder, *codecFnInfo, reflect.Value) - decfn func(*Decoder, *codecFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE -type fastpathARtid [{{ .FastpathLen }}]uintptr - -var fastpathAv fastpathA -var fastpathAvRtid fastpathARtid - -type fastpathAslice struct{} - -func (fastpathAslice) Len() int { return {{ .FastpathLen }} } -func (fastpathAslice) Less(i, j int) bool { - return fastpathAvRtid[uint(i)] < fastpathAvRtid[uint(j)] -} -func (fastpathAslice) Swap(i, j int) { - fastpathAvRtid[uint(i)], fastpathAvRtid[uint(j)] = fastpathAvRtid[uint(j)], fastpathAvRtid[uint(i)] - fastpathAv[uint(i)], fastpathAv[uint(j)] = fastpathAv[uint(j)], fastpathAv[uint(i)] -} - -func fastpathAvIndex(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - // Note: we use goto (instead of for loop) so this can be inlined. - // h, i, j := 0, 0, {{ .FastpathLen }} - var h, i uint - var j uint = {{ .FastpathLen }} -LOOP: - if i < j { - h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2 - if fastpathAvRtid[h] < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP - } - if i < {{ .FastpathLen }} && fastpathAvRtid[i] == rtid { - return int(i) - } - return -1 -} - - -// due to possible initialization loop error, make fastpath in an init() -func init() { - var i uint = 0 - fn := func(v interface{}, - fe func(*Encoder, *codecFnInfo, reflect.Value), - fd func(*Decoder, *codecFnInfo, reflect.Value)) { - xrt := reflect.TypeOf(v) - xptr := rt2id(xrt) - fastpathAvRtid[i] = xptr - fastpathAv[i] = fastpathE{xrt, fe, fd} - i++ - } - {{/* do not register []byte in fast-path */}} - {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} - fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R) - {{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey -}} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R) - {{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice{}) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *[]{{ .Elem }}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) - } -{{end}}{{end}}{{end -}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - if *v == nil { - e.e.EncodeNil() - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) - } -{{end}}{{end}}{{end -}} - - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} -func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { - var v []{{ .Elem }} - if rv.Kind() == reflect.Array { - rvGetSlice4Array(rv, &v) - } else { - v = rv2i(rv).([]{{ .Elem }}) - } - if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(v, e) - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) - } -} -func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { - {{/* if v == nil { e.e.EncodeNil(); return } */ -}} - {{ if eq .Elem "uint8" "byte" -}} - e.e.EncodeStringBytesRaw(v) - {{ else -}} - e.arrayStart(len(v)) - for j := range v { - e.arrayElem() - {{ encmd .Elem "v[j]"}} - } - e.arrayEnd() - {{ end -}} -} -func (fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { - {{/* if v == nil { e.e.EncodeNil() } else */ -}} - e.haltOnMbsOddLen(len(v)) - {{/* - if len(v)&1 != 0 { // similar to &1==1 or %2 == 1 - e.errorf(fastpathMapBySliceErrMsg, len(v)) - } - */ -}} - e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) - for j := range v { - if j&1 == 0 { // if j%2 == 0 { - e.mapElemKey() - } else { - e.mapElemValue() - } - {{ encmd .Elem "v[j]"}} - } - e.mapEnd() -} -{{end}}{{end}}{{end -}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} -func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) -} -func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { - {{/* if v == nil { e.e.EncodeNil(); return } */ -}} - e.mapStart(len(v)) - if e.h.Canonical { {{/* need to figure out .NoCanonical */}} - {{if eq .MapKey "interface{}"}}{{/* out of band */ -}} - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesIntf, len(v)) - var i, l uint {{/* put loop variables outside. seems currently needed for better perf */}} - var vp *bytesIntf - for k2 := range v { - l = uint(len(mksv)) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesIntfSlice(v2)) - for j := range v2 { - e.mapElemKey() - e.asis(v2[j].v) - e.mapElemValue() - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i uint - for k := range v { - v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}} - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - e.mapElemKey() - {{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}} - e.mapElemValue() - {{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - e.mapElemKey() - {{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}} - e.mapElemValue() - {{ encmd .Elem "v2"}} - } - } - e.mapEnd() -} -{{end}}{{end}}{{end -}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - var changed bool - var containerLen int - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d) - case *[]{{ .Elem }}: - var v2 []{{ .Elem }} - if v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed { - *v = v2 - } -{{end}}{{end}}{{end -}} -{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/* -// maps only change if nil, and in that case, there's no point copying -*/ -}} - case map[{{ .MapKey }}]{{ .Elem }}: - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen != containerLenNil { - if containerLen != 0 { - fastpathTV.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d) - } - d.mapEnd() - } - case *map[{{ .MapKey }}]{{ .Elem }}: - {{/* - containerLen = d.mapStart(d.d.ReadMapStart()) - if containerLen == 0 { - d.mapEnd() - } else if containerLen == containerLenNil { - *v = nil - } else { - if *v == nil { - *v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})) - } - fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d) - } - // consider delegating fully to X - encoding *map is uncommon, so ok to pay small function call cost - */ -}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}X(v, d) -{{end}}{{end}}{{end -}} - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} - case *[]{{ .Elem }}: - *v = nil -{{end}}{{end}}{{end}} -{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} - case *map[{{ .MapKey }}]{{ .Elem }}: - *v = nil -{{end}}{{end}}{{end}} - default: - _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { - {{/* - // seqTypeArray=true means that we are not getting a pointer, so no need to check that. - if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr { - */ -}} - var v []{{ .Elem }} - switch rv.Kind() { - case reflect.Ptr: - vp := rv2i(rv).(*[]{{ .Elem }}) - var changed bool - if v, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { - *vp = v - } - case reflect.Array: - rvGetSlice4Array(rv, &v) - fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d) - default: - fastpathTV.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { - if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v } -} -func (fastpathT) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *Decoder) (v2 []{{ .Elem }}, changed bool) { - {{ if eq .Elem "uint8" "byte" -}} - switch d.d.ContainerType() { - case valueTypeNil, valueTypeMap: - break - default: - v2 = d.decodeBytesInto(v[:len(v):len(v)]) - changed = !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) // not same slice - return - } - {{ end -}} - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - if v == nil { return } - return nil, true - } - if containerLenS == 0 { - if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] } - slh.End() - return v, true - } - hasLen := containerLenS > 0 - var xlen int - if hasLen { - if containerLenS > cap(v) { - xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xlen <= cap(v) { - v = v[:uint(xlen)] - } else { - v = make([]{{ .Elem }}, uint(xlen)) - } - changed = true - } else if containerLenS != len(v) { - v = v[:containerLenS] - changed = true - } - } - var j int - for j = 0; d.containerNext(j, containerLenS, hasLen); j++ { - if j == 0 && len(v) == 0 { // means hasLen == false - xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) {{/* xlen = decDefSliceCap */}} - v = make([]{{ .Elem }}, uint(xlen)) - changed = true - } - {{/* // if indefinite, etc, then expand the slice if necessary */ -}} - if j >= len(v) { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }} - } - if j < len(v) { - v = v[:uint(j)] - changed = true - } else if j == 0 && v == nil { - v = []{{ .Elem }}{} - changed = true - } - slh.End() - return v, changed -} -func (fastpathT) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *Decoder) { - {{ if eq .Elem "uint8" "byte" -}} - switch d.d.ContainerType() { - case valueTypeNil, valueTypeMap: - break - default: - v2 := d.decodeBytesInto(v[:len(v):len(v)]) - if !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) { // not same slice - copy(v, v2) - } - return - } - {{ end -}} - slh, containerLenS := d.decSliceHelperStart() - if slh.IsNil { - return - } - if containerLenS == 0 { - slh.End() - return - } - hasLen := containerLenS > 0 - for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { - {{/* // if indefinite, etc, then expand the slice if necessary */ -}} - if j >= len(v) { - slh.arrayCannotExpand(hasLen, len(v), j, containerLenS) - return - } - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" -}} - d.decode(&v[uint(j)]) - {{- else -}} - v[uint(j)] = {{ decmd .Elem false }} - {{- end }} - } - slh.End() -} -{{end}}{{end}}{{end -}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) - -Also, these methods are called by decodeValue directly, after handling a TryNil. -Consequently, there's no need to check for containerLenNil here. -*/ -}} -func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { - containerLen := d.mapStart(d.d.ReadMapStart()) - {{/* - if containerLen == containerLenNil { - if rv.Kind() == reflect.Ptr { - *(rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})) = nil - } - return - } - */ -}} - if rv.Kind() == reflect.Ptr { - vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) - if *vp == nil { - *vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})) - } - if containerLen != 0 { - fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d) - } - } else if containerLen != 0 { - fastpathTV.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d) - } - d.mapEnd() -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { - containerLen := d.mapStart(d.d.ReadMapStart()) - if containerLen == containerLenNil { - *vp = nil - } else { - if *vp == nil { - *vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})) - } - if containerLen != 0 { - f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d) - } - d.mapEnd() - } -} -func (fastpathT) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *Decoder) { - {{/* No need to check if containerLen == containerLenNil, as that is checked by R and L above */ -}} - if v == nil { - d.errorf("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: %v", containerLen) - {{/* d.swallowMapContents(containerLen) */ -}} - return - } - {{if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset - {{else if eq .Elem "bytes" "[]byte" }}mapGet := v != nil && !d.h.MapValueReset - {{end -}} - var mk {{ .MapKey }} - var mv {{ .Elem }} - hasLen := containerLen > 0 - for j := 0; d.containerNext(j, containerLen, hasLen); j++ { - d.mapElemKey() - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.stringZC(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey true }}{{ end }} - d.mapElemValue() - {{ if eq .Elem "interface{}" "[]byte" "bytes" -}} - if mapGet { mv = v[mk] } else { mv = nil } - {{ end -}} - {{ if eq .Elem "interface{}" -}} - d.decode(&mv) - {{ else if eq .Elem "[]byte" "bytes" -}} - mv = d.decodeBytesInto(mv) - {{ else -}} - mv = {{ decmd .Elem false }} - {{ end -}} - v[mk] = mv - } -} -{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/fastpath.go.tmpl b/vendor/github.com/ugorji/go/codec/fastpath.go.tmpl new file mode 100644 index 000000000..db60ea837 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fastpath.go.tmpl @@ -0,0 +1,134 @@ +//go:build !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fastpath.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register them in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types (numeric, bool, string, []byte) +// - maps of builtin types to builtin or interface{} type, EXCEPT FOR +// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{} +// AND values of type type int8/16/32, uint16/32 +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +{{/* +// ---------------- +fastpathEncMapR func (mapped to type id), routes to: +- ft.EncMapV + +fastpathEncSliceR func (mapped to type id), delegates to one of: +- ft.EncSliceV +- ft.EncAsMapSliceV (when mapbyslice ie f.ti.mbs=true) + +// ---------------- +fastpathDecSliceR func (mapped to type id), delegates to: +- ft.DecSliceIntfY (when slice CAN be updated) +- ft.DecSliceIntfN (when slice CANNOT be updated e.g. from array or non-addressable slice) + +fastpathDecMapR func (mapped to type id), routes to +- ft.DecMapL (handles ptr which is changeable, and non-pointer which cannot be made if nil) + +// ---------------- +NOTE: +- fastpath typeswitch directly calls the secondary methods for builtin maps/slices with appropriate nil handling: + - except EncAsMapSliceV which only applies to wrapper types not those in the switch +- fastpathEncXXX functions mapped to type ID MUST do nil-checks during encode + - they are only called by decodeValue/encodeValue or other code (same way kMap et al are called) +*/ -}} + +import ( + "reflect" + "sort" + "slices" +) + +const fastpathEnabled = true + +{{/* +const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" +*/ -}} + +type fastpathARtid [{{ .FastpathLen }}]uintptr + +type fastpathRtRtid struct { + rtid uintptr + rt reflect.Type +} +type fastpathARtRtid [{{ .FastpathLen }}]fastpathRtRtid + +var ( + fastpathAvRtidArr fastpathARtid + fastpathAvRtRtidArr fastpathARtRtid + fastpathAvRtid = fastpathAvRtidArr[:] + fastpathAvRtRtid = fastpathAvRtRtidArr[:] +) + +func fastpathAvIndex(rtid uintptr) (i uint, ok bool) { + return searchRtids(fastpathAvRtid, rtid) +} + +func init() { + var i uint = 0 + fn := func(v interface{}) { + xrt := reflect.TypeOf(v) + xrtid := rt2id(xrt) + xptrtid := rt2id(reflect.PointerTo(xrt)) + {{- /* only the base slice/map rtid is put in fastpathAvIndex, since we only handle slices/map/array */}} + fastpathAvRtid[i] = xrtid + fastpathAvRtRtid[i] = fastpathRtRtid{ rtid: xrtid, rt: xrt } + {{- /* fastpath type switches however handle slices/map/array, and pointers to them */}} + encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid) + decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid) + i++ + } + {{/* do not register []byte in fastpath */}} + {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + fn([]{{ .Elem }}(nil)) + {{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey -}} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil)) + {{end}}{{end}}{{end}} + + sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] }) + sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid }) + slices.Sort(encBuiltinRtids) + slices.Sort(decBuiltinRtids) +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + case *[]{{ .Elem }}: + *v = nil +{{end}}{{end}}{{end}} +{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} + case *map[{{ .MapKey }}]{{ .Elem }}: + *v = nil +{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} diff --git a/vendor/github.com/ugorji/go/codec/fastpath.notmono.go.tmpl b/vendor/github.com/ugorji/go/codec/fastpath.notmono.go.tmpl new file mode 100644 index 000000000..a85a0d23a --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fastpath.notmono.go.tmpl @@ -0,0 +1,525 @@ +//go:build !notfastpath && !codec.notfastpath && (notmono || codec.notmono) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fastpath.notmono.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "reflect" + "sort" + "slices" +) + +type fastpathE[T encDriver] struct { + rtid uintptr + rt reflect.Type + encfn func(*encoder[T], *encFnInfo, reflect.Value) +} +type fastpathD[T decDriver] struct { + rtid uintptr + rt reflect.Type + decfn func(*decoder[T], *decFnInfo, reflect.Value) +} +type fastpathEs[T encDriver] [{{ .FastpathLen }}]fastpathE[T] +type fastpathDs[T decDriver] [{{ .FastpathLen }}]fastpathD[T] + +type fastpathET[T encDriver] struct{} +type fastpathDT[T decDriver] struct{} + +func (helperEncDriver[T]) fastpathEList() *fastpathEs[T] { + var i uint = 0 + var s fastpathEs[T] + fn := func(v interface{}, fe func(*encoder[T], *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathE[T]{rt2id(xrt), xrt, fe} + i++ + } + {{/* do not register []byte in fastpath */}} + {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + fn([]{{ .Elem }}(nil), (*encoder[T]).{{ .MethodNamePfx "fastpathEnc" false }}R) + {{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey -}} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encoder[T]).{{ .MethodNamePfx "fastpathEnc" false }}R) + {{end}}{{end}}{{end}} + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriver[T]) fastpathDList() *fastpathDs[T] { + var i uint = 0 + var s fastpathDs[T] + fn := func(v interface{}, fd func(*decoder[T], *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathD[T]{rt2id(xrt), xrt, fd} + i++ + } + {{/* do not register []byte in fastpath */}} + {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + fn([]{{ .Elem }}(nil), (*decoder[T]).{{ .MethodNamePfx "fastpathDec" false }}R) + {{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey -}} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*decoder[T]).{{ .MethodNamePfx "fastpathDec" false }}R) + {{end}}{{end}}{{end}} + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +// -- encode + +// -- -- fast path type switch +func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { + var ft fastpathET[T] + switch v := iv.(type) { + {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + case []{{ .Elem }}: + if v == nil { e.e.writeNilArray() } else { ft.{{ .MethodNamePfx "Enc" false }}V(v, e) } +{{end}}{{end}}{{end -}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} + case map[{{ .MapKey }}]{{ .Elem }}: + if v == nil { e.e.writeNilMap() } else { ft.{{ .MethodNamePfx "Enc" false }}V(v, e) } +{{end}}{{end}}{{end -}} + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} +func (e *encoder[T]) {{ .MethodNamePfx "fastpathEnc" false }}R(f *encFnInfo, rv reflect.Value) { + var ft fastpathET[T] + var v []{{ .Elem }} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]{{ .Elem }}) + } + if f.ti.mbs { + ft.{{ .MethodNamePfx "EncAsMap" false }}V(v, e) + return + } + ft.{{ .MethodNamePfx "Enc" false }}V(v, e) +} +func (fastpathET[T]) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *encoder[T]) { + {{ if eq .Elem "uint8" "byte" -}} + e.e.EncodeStringBytesRaw(v) + {{ else -}} + if len(v) == 0 { + e.c = 0; e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem; e.e.WriteArrayElem(j == 0) + {{ encmd .Elem "v[j]"}} + } + e.c = 0; e.e.WriteArrayEnd() + {{ end -}} +} +func (fastpathET[T]) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *encoder[T]) { + if len(v) == 0 { + e.c = 0; e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2) + for j := range v { + if j&1 == 0 { // if j%2 == 0 { + e.c = containerMapKey; e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + {{ encmd .Elem "v[j]"}} + } + e.c = 0; e.e.WriteMapEnd() +} + +{{end}}{{end}}{{end -}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} +func (e *encoder[T]) {{ .MethodNamePfx "fastpathEnc" false }}R(f *encFnInfo, rv reflect.Value) { + {{/* var ft fastpathET[T] + ft.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) */ -}} + fastpathET[T]{}.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) +} +func (fastpathET[T]) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *encoder[T]) { + {{/* if v == nil { e.e.EncodeNil(); return } */ -}} + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { {{/* need to figure out .NoCanonical */}} + {{if eq .MapKey "interface{}"}}{{/* out of band */ -}} + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesIntf, len(v)) + var l uint {{/* put loop variables outside. seems currently needed for better perf */}} + var vp *bytesIntf + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + slices.SortFunc(v2, cmpBytesIntf) + for j := range v2 { + e.c = containerMapKey; e.e.WriteMapElemKey(j == 0) + e.asis(v2[j].v) + e.mapElemValue() + e.encode(v[v2[j].i]) + } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) + for k := range v { + v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}} + i++ + } + slices.Sort(v2) + {{/* // sort.Sort({{ sorttype .MapKey false}}(v2)) */ -}} + for i, k2 := range v2 { + e.c = containerMapKey; e.e.WriteMapElemKey(i == 0) + {{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}} + e.mapElemValue() + {{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }} + } {{end}} + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey; e.e.WriteMapElemKey(i == 0) + {{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}} + e.mapElemValue() + {{ encmd .Elem "v2"}} + i++ + } + } + e.c = 0; e.e.WriteMapEnd() +} +{{end}}{{end}}{{end -}} + +// -- decode + +// -- -- fast path type switch +func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool { + var ft fastpathDT[T] + var changed bool + var containerLen int + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + case []{{ .Elem }}: + ft.{{ .MethodNamePfx "Dec" false }}N(v, d) + case *[]{{ .Elem }}: + var v2 []{{ .Elem }} + if v2, changed = ft.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed { + *v = v2 + } +{{end}}{{end}}{{end -}} +{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/* +// maps only change if nil, and in that case, there's no point copying +*/ -}} + case map[{{ .MapKey }}]{{ .Elem }}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d) + } + d.mapEnd() + } + case *map[{{ .MapKey }}]{{ .Elem }}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }})) + } + if containerLen != 0 { + ft.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d) + } + d.mapEnd() + } +{{end}}{{end}}{{end -}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} +{{/* +Slices can change if they +- did not come from an array +- are addressable (from a ptr) +- are settable (e.g. contained in an interface{}) +*/}} +func (d *decoder[T]) {{ .MethodNamePfx "fastpathDec" false }}R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + {{/* + // seqTypeArray=true means that we are not getting a pointer, so no need to check that. + if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr { + */ -}} + switch rv.Kind() { + case reflect.Ptr: {{- /* this block is called for types that wrap a fastpath type e.g. wrapSliceUint64 */}} + v := rv2i(rv).(*[]{{ .Elem }}) + if vv, changed := ft.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []{{ .Elem }} + rvGetSlice4Array(rv, &v) + ft.{{ .MethodNamePfx "Dec" false }}N(v, d) + default: + ft.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d) + } +} +func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *decoder[T]) (v2 []{{ .Elem }}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + {{ if eq .Elem "uint8" "byte" -}} + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + {{ else -}} + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + {{ end -}} + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []{{ .Elem }}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), {{ .Size }})); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]{{ .Elem }}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + {{ if eq .Elem "uint8" "byte" }}{{ else -}} + if isArray { d.arrayElem(j == 0) } else + {{ end -}} + if j&1 == 0 { d.mapElemKey(j == 0) } else { d.mapElemValue() } + if j >= len(v) { {{- /* // if indefinite, json, etc, then expand the slice (if necessary) */}} + fnv(append(v, {{ zerocmd .Elem }})) + } + {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }} + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]{{ .Elem }}{}) + } + {{ if eq .Elem "uint8" "byte" -}} + d.mapEnd() + {{ else -}} + if isArray { d.arrayEnd() } else { d.mapEnd() } + {{ end -}} + return v, changed +} +func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *decoder[T]) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + {{ if eq .Elem "uint8" "byte" -}} + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + {{ else -}} + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + {{ end -}} + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + {{/* // if indefinite, etc, then expand the slice if necessary */ -}} + {{ if not (eq .Elem "uint8" "byte") -}} + if isArray { d.arrayElem(j == 0) } else + {{ end -}} + if j&1 == 0 { d.mapElemKey(j == 0) } else { d.mapElemValue() } + if j < len(v) { + {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }} + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + {{ if eq .Elem "uint8" "byte" -}} + d.mapEnd() + {{ else -}} + if isArray { d.arrayEnd() } else { d.mapEnd() } + {{ end -}} +} +{{end}}{{end}}{{end -}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} +{{/* +Maps can change if they are +- addressable (from a ptr) +- settable (e.g. contained in an interface{}) +*/ -}} +func (d *decoder[T]) {{ .MethodNamePfx "fastpathDec" false }}R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDT[T] + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { {{- /* this block is called for types that wrap a fastpath type e.g. wrapMapStringUint64 */}} + vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) + if *vp == nil { + *vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }})) + } + if containerLen != 0 { + ft.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *decoder[T]) { + if v == nil { + halt.errorInt("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: ", int64(containerLen)) + {{/* d.swallowMapContents(containerLen); return */ -}} + } + {{if eq .MapKey "interface{}" -}} + var mk {{ .MapKey }} + {{end -}} + {{ if eq .Elem "interface{}" "[]byte" "bytes" -}} + var mv {{ .Elem }} + mapGet := !d.h.MapValueReset + {{- if eq .Elem "interface{}" -}} + && !d.h.InterfaceReset + {{- end}} + {{end -}} + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + {{ if eq .MapKey "interface{}" -}} + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.detach2Str(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk := {{ decmd .MapKey true }}{{ end }} + d.mapElemValue() + {{ if eq .Elem "interface{}" "[]byte" "bytes" -}} + if mapGet { mv = v[mk] } else { mv = nil } + {{ end -}} + {{ if eq .Elem "interface{}" -}} + d.decode(&mv) + v[mk] = mv + {{ else if eq .Elem "[]byte" "bytes" -}} + v[mk], _ = d.decodeBytesInto(mv, false) + {{ else -}} + v[mk] = {{ decmd .Elem false }} + {{ end -}} + } +} +{{end}}{{end}}{{end}} + +{{- /* + +// -- -- fast path type switch +func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { + var ft fastpathET[T] + switch v := iv.(type) { + {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}} + case []{{ .Elem }}: + if v != nil { + ft.{{ .MethodNamePfx "Enc" false }}V(v, e) + } else if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + case *[]{{ .Elem }}: + if *v != nil { + ft.{{ .MethodNamePfx "Enc" false }}V(*v, e) + } else if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } +{{end}}{{end}}{{end -}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey -}} + case map[{{ .MapKey }}]{{ .Elem }}: + if v != nil { + ft.{{ .MethodNamePfx "Enc" false }}V(v, e) + } else if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + case *map[{{ .MapKey }}]{{ .Elem }}: + if *v != nil { + ft.{{ .MethodNamePfx "Enc" false }}V(*v, e) + } else if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } +{{end}}{{end}}{{end -}} + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// used within codecgen, which is no longer supported +func (f fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *decoder[T]) { + if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v } +} + +func (f fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *decoder[T]) { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == containerLenNil { + *vp = nil + return + } + if *vp == nil { + *vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }})) + } + if containerLen != 0 { + f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d) + } + d.mapEnd() +} + +*/ -}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl deleted file mode 100644 index 5e119e715..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,90 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} -{{if not isArray -}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}} -if {{var "h"}}.IsNil { - if {{var "v"}} != nil { - {{var "v"}} = nil - {{var "c"}} = true - } -} else {{end -}} -if {{var "l"}} == 0 { - {{if isSlice -}} - if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{else if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } - {{end -}} -} else { - {{var "hl"}} := {{var "l"}} > 0 - var {{var "rl"}} int - _ = {{var "rl"}} - {{if isSlice }} if {{var "hl"}} { - if {{var "l"}} > cap({{var "v"}}) { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - } else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } - } - {{end -}} - var {{var "j"}} int - {{/* // var {{var "dn"}} bool */ -}} - for {{var "j"}} = 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ { - {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { - if {{var "hl"}} { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - } else { - {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} - } - {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) - {{var "c"}} = true - } - {{end -}} - {{var "h"}}.ElemContainerState({{var "j"}}) - {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}} - {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} - {{ decLineVar $x -}} - {{var "v"}} <- {{ $x }} - {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}} - var {{var "db"}} bool - if {{var "j"}} >= len({{var "v"}}) { - {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) - {{var "c"}} = true - {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true - {{end -}} - } - if {{var "db"}} { - z.DecSwallow() - } else { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}} - } - {{end -}} - } - {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } - {{end -}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -} -{{end -}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl deleted file mode 100644 index b32ade2e1..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,58 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := z.DecReadMapStart() -if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} { - *{{ .Varname }} = nil -} else { -if {{var "v"}} == nil { - {{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -{{ $mk := var "mk" -}} -var {{ $mk }} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if z.DecBasicHandle().MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} != 0 { - {{var "hl"}} := {{var "l"}} > 0 - for {{var "j"}} := 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ { - z.DecReadMapElemKey() - {{ if eq .KTyp "string" -}} - {{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}} - {{ else -}} - {{ decLineVarK $mk -}} - {{ end -}} - {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}} - if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = z.DecStringZC({{var "bv"}}) - } - {{ end -}} - {{if decElemKindPtr -}} - {{var "ms"}} = true - {{end -}} - if {{var "mg"}} { - {{if decElemKindPtr -}} - {{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}] - if {{var "mok"}} { - {{var "ms"}} = false - } - {{else -}} - {{var "mv"}} = {{var "v"}}[{{ $mk }}] - {{end -}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecReadMapElemValue() - {{var "mdn"}} = false - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}} - if {{var "mdn"}} { - {{var "v"}}[{{ $mk }}] = {{decElemZero}} - } else {{if decElemKindPtr}} if {{var "ms"}} {{end}} { - {{var "v"}}[{{ $mk }}] = {{var "mv"}} - } -} -} // else len==0: leave as-is (do not clear map entries) -z.DecReadMapEnd() -} diff --git a/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl deleted file mode 100644 index 4249588a3..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl +++ /dev/null @@ -1,27 +0,0 @@ -{{.Label}}: -switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { -case timeout{{.Sfx}} == 0: // only consume available - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) - default: - break {{.Label}} - } - } -case timeout{{.Sfx}} > 0: // consume until timeout - tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - case <-tt{{.Sfx}}.C: - // close(tt.C) - break {{.Label}} - } - } -default: // consume until close - for b{{.Sfx}} := range {{.Chan}} { - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - } -} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go deleted file mode 100644 index 9a145f140..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ /dev/null @@ -1,294 +0,0 @@ -// comment this out // + build ignore - -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from gen-helper.go.tmpl - DO NOT EDIT. - -package codec - -import ( - "encoding" - "reflect" -) - -// GenVersion is the current version of codecgen. -const GenVersion = 28 - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelper() (g genHelper) { return } - -type genHelper struct{} - -func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { - ge = genHelperEncoder{e: e} - ee = genHelperEncDriver{encDriver: e.e} - return -} - -func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { - gd = genHelperDecoder{d: d} - dd = genHelperDecDriver{decDriver: d.d} - return -} - -type genHelperEncDriver struct { - encDriver -} - -type genHelperDecDriver struct { - decDriver -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - M mustHdl - F fastpathT - e *Encoder -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - C checkOverflow - F fastpathT - d *Decoder -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWr() *encWr { - return f.e.w() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // f.e.encodeI(iv, false, false) - f.e.encodeValue(reflect.ValueOf(iv), nil) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshalUtf8(bs, fnerr) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshalAsis(bs, fnerr) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshalRaw(bs, fnerr) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) { - return f.e.h.getExtForI(v) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { - f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFnGivenAddr(v interface{}) *codecFn { - return f.e.h.fn(reflect.TypeOf(v).Elem()) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeNumBoolStrKindGivenAddr(v interface{}, encFn *codecFn) { - f.e.encodeValueNonNil(reflect.ValueOf(v).Elem(), encFn) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) { - if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) { - f.e.encodeValueNonNil(reflect.ValueOf(v), nil) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { f.d.swallow() } - -// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// func (f genHelperDecoder) DecScratchBuffer() []byte { -// return f.d.b[:] -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { - return &f.d.b -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - rv := reflect.ValueOf(iv) - if chkPtr { - if x, _ := isDecodeable(rv); !x { - f.d.haltAsNotDecodeable(rv) - } - } - f.d.decodeValue(rv, nil) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes())) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - f.d.jsonUnmarshalV(tm) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil))) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) { - return f.d.h.getExtForI(v) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { - f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerNext(j, containerLen int, hasLen bool) bool { - // return f.d.containerNext(j, containerLen, hasLen) - // rewriting so it can be inlined - if hasLen { - return j < containerLen - } - return !f.d.checkBreak() -} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl deleted file mode 100644 index bf824ebdd..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,273 +0,0 @@ -// comment this out // + build ignore - -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from gen-helper.go.tmpl - DO NOT EDIT. - -package codec - -import ( - "encoding" - "reflect" -) - -// GenVersion is the current version of codecgen. -const GenVersion = {{ .Version }} - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. - -{{/* -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. -*/ -}} - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// -// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelper() (g genHelper) { return } - -type genHelper struct {} - -func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { - ge = genHelperEncoder{e: e} - ee = genHelperEncDriver{encDriver: e.e} - return -} - -func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { - gd = genHelperDecoder{d: d} - dd = genHelperDecDriver{decDriver: d.d} - return -} - -type genHelperEncDriver struct { - encDriver -} - -type genHelperDecDriver struct { - decDriver -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - M mustHdl - F fastpathT - e *Encoder -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - C checkOverflow - F fastpathT - d *Decoder -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWr() *encWr { - return f.e.w() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // f.e.encodeI(iv, false, false) - f.e.encodeValue(reflect.ValueOf(iv), nil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshalUtf8(bs, fnerr) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshalAsis(bs, fnerr) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshalRaw(bs, fnerr) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) { - return f.e.h.getExtForI(v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { - f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFnGivenAddr(v interface{}) *codecFn { return f.e.h.fn(reflect.TypeOf(v).Elem()) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeNumBoolStrKindGivenAddr(v interface{}, encFn *codecFn) { - f.e.encodeValueNonNil(reflect.ValueOf(v).Elem(), encFn) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) { - if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) { - f.e.encodeValueNonNil(reflect.ValueOf(v), nil) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { f.d.swallow() } - -// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// func (f genHelperDecoder) DecScratchBuffer() []byte { -// return f.d.b[:] -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { - return &f.d.b -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - rv := reflect.ValueOf(iv) - if chkPtr { - if x, _ := isDecodeable(rv); !x { - f.d.haltAsNotDecodeable(rv) - } - } - f.d.decodeValue(rv, nil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes())) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - f.d.jsonUnmarshalV(tm) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil))) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) { - return f.d.h.getExtForI(v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { - f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerNext(j, containerLen int, hasLen bool) bool { - // return f.d.containerNext(j, containerLen, hasLen) - // rewriting so it can be inlined - if hasLen { - return j < containerLen - } - return !f.d.checkBreak() -} - -{{/* -// MARKER: remove WriteStr, as it cannot be inlined as of 20230201. -// Instead, generated code calls (*encWr).WriteStr directly. - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -// func (f genHelperEncoder) WriteStr(s string) { -// f.e.encWr.writestr(s) -// } - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { - return i2rtid(v) -} - -*/ -}} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go deleted file mode 100644 index 277180a01..000000000 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ /dev/null @@ -1,192 +0,0 @@ -// +build codecgen.exec - -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl - -const genDecMapTmpl = ` -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := z.DecReadMapStart() -if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} { - *{{ .Varname }} = nil -} else { -if {{var "v"}} == nil { - {{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -{{ $mk := var "mk" -}} -var {{ $mk }} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if z.DecBasicHandle().MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} != 0 { - {{var "hl"}} := {{var "l"}} > 0 - for {{var "j"}} := 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ { - z.DecReadMapElemKey() - {{ if eq .KTyp "string" -}} - {{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}} - {{ else -}} - {{ decLineVarK $mk -}} - {{ end -}} - {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}} - if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = z.DecStringZC({{var "bv"}}) - } - {{ end -}} - {{if decElemKindPtr -}} - {{var "ms"}} = true - {{end -}} - if {{var "mg"}} { - {{if decElemKindPtr -}} - {{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}] - if {{var "mok"}} { - {{var "ms"}} = false - } - {{else -}} - {{var "mv"}} = {{var "v"}}[{{ $mk }}] - {{end -}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecReadMapElemValue() - {{var "mdn"}} = false - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}} - if {{var "mdn"}} { - {{var "v"}}[{{ $mk }}] = {{decElemZero}} - } else {{if decElemKindPtr}} if {{var "ms"}} {{end}} { - {{var "v"}}[{{ $mk }}] = {{var "mv"}} - } -} -} // else len==0: leave as-is (do not clear map entries) -z.DecReadMapEnd() -} -` - -const genDecListTmpl = ` -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} -{{if not isArray -}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}} -if {{var "h"}}.IsNil { - if {{var "v"}} != nil { - {{var "v"}} = nil - {{var "c"}} = true - } -} else {{end -}} -if {{var "l"}} == 0 { - {{if isSlice -}} - if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{else if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } - {{end -}} -} else { - {{var "hl"}} := {{var "l"}} > 0 - var {{var "rl"}} int - _ = {{var "rl"}} - {{if isSlice }} if {{var "hl"}} { - if {{var "l"}} > cap({{var "v"}}) { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - } else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } - } - {{end -}} - var {{var "j"}} int - {{/* // var {{var "dn"}} bool */ -}} - for {{var "j"}} = 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ { - {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { - if {{var "hl"}} { - {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - } else { - {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} - } - {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) - {{var "c"}} = true - } - {{end -}} - {{var "h"}}.ElemContainerState({{var "j"}}) - {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}} - {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} - {{ decLineVar $x -}} - {{var "v"}} <- {{ $x }} - {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}} - var {{var "db"}} bool - if {{var "j"}} >= len({{var "v"}}) { - {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) - {{var "c"}} = true - {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true - {{end -}} - } - if {{var "db"}} { - z.DecSwallow() - } else { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}} - } - {{end -}} - } - {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } - {{end -}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -} -{{end -}} -` - -const genEncChanTmpl = ` -{{.Label}}: -switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { -case timeout{{.Sfx}} == 0: // only consume available - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) - default: - break {{.Label}} - } - } -case timeout{{.Sfx}} > 0: // consume until timeout - tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) - for { - select { - case b{{.Sfx}} := <-{{.Chan}}: - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - case <-tt{{.Sfx}}.C: - // close(tt.C) - break {{.Label}} - } - } -default: // consume until close - for b{{.Sfx}} := range {{.Chan}} { - {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) - } -} -` diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go index 0026e3e1c..4e521adf8 100644 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -1,8 +1,7 @@ // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. -//go:build codecgen.exec -// +build codecgen.exec +//go:build codec.build package codec @@ -13,2145 +12,64 @@ import ( "fmt" "go/format" "io" - "io/ioutil" - "math/rand" "os" - "reflect" "regexp" - "sort" - "strconv" "strings" "sync" "text/template" - "time" // "ugorji.net/zz" - "unicode" - "unicode/utf8" ) // --------------------------------------------------- -// codecgen supports the full cycle of reflection-based codec: -// - RawExt -// - Raw -// - Extensions -// - (Binary|Text|JSON)(Unm|M)arshal -// - generic by-kind -// -// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. -// In those areas, we try to only do reflection or interface-conversion when NECESSARY: -// - Extensions, only if Extensions are configured. -// -// However, note following codecgen caveats: -// - Canonical option. -// If Canonical=true, codecgen'ed code may delegate encoding maps to reflection-based code. -// This is due to the runtime work needed to marshal a map in canonical mode. -// However, if map key is a pre-defined/builtin numeric or string type, codecgen -// will try to write it out itself -// - CheckCircularRef option. -// When encoding a struct, a circular reference can lead to a stack overflow. -// If CheckCircularRef=true, codecgen'ed code will delegate encoding structs to reflection-based code. -// - MissingFielder implementation. -// If a type implements MissingFielder, a Selfer is not generated (with a warning message). -// Statically reproducing the runtime work needed to extract the missing fields and marshal them -// along with the struct fields, while handling the Canonical=true special case, was onerous to implement. -// -// During encode/decode, Selfer takes precedence. -// A type implementing Selfer will know how to encode/decode itself statically. -// -// The following field types are supported: -// array: [n]T -// slice: []T -// map: map[K]V -// primitive: [u]int[n], float(32|64), bool, string -// struct -// -// --------------------------------------------------- -// Note that a Selfer cannot call (e|d).(En|De)code on itself, -// as this will cause a circular reference, as (En|De)code will call Selfer methods. -// Any type that implements Selfer must implement completely and not fallback to (En|De)code. -// -// In addition, code in this file manages the generation of fast-path implementations of -// encode/decode of slices/maps of primitive keys/values. -// -// Users MUST re-generate their implementations whenever the code shape changes. -// The generated code will panic if it was generated with a version older than the supporting library. -// --------------------------------------------------- -// -// codec framework is very feature rich. -// When encoding or decoding into an interface, it depends on the runtime type of the interface. -// The type of the interface may be a named type, an extension, etc. -// Consequently, we fallback to runtime codec for encoding/decoding interfaces. -// In addition, we fallback for any value which cannot be guaranteed at runtime. -// This allows us support ANY value, including any named types, specifically those which -// do not implement our interfaces (e.g. Selfer). -// -// This explains some slowness compared to other code generation codecs (e.g. msgp). -// This reduction in speed is only seen when your refers to interfaces, -// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } -// -// codecgen will panic if the file was generated with an old version of the library in use. -// -// Note: -// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. -// This way, there isn't a function call overhead just to see that we should not enter a block of code. -// -// Note: -// codecgen-generated code depends on the variables defined by fast-path.generated.go. -// consequently, you cannot run with tags "codecgen codec.notfastpath". -// -// Note: -// genInternalXXX functions are used for generating fast-path and other internally generated -// files, and not for use in codecgen. - -// Size of a struct or value is not portable across machines, especially across 32-bit vs 64-bit -// operating systems. This is due to types like int, uintptr, pointers, (and derived types like slice), etc -// which use the natural word size on those machines, which may be 4 bytes (on 32-bit) or 8 bytes (on 64-bit). -// -// Within decInferLen calls, we may generate an explicit size of the entry. -// We do this because decInferLen values are expected to be approximate, -// and serve as a good hint on the size of the elements or key+value entry. -// -// Since development is done on 64-bit machines, the sizes will be roughly correctly -// on 64-bit OS, and slightly larger than expected on 32-bit OS. -// This is ok. -// -// For reference, look for 'Size' in fast-path.go.tmpl, gen-dec-(array|map).go.tmpl and gen.go (this file). - -// GenVersion is the current version of codecgen. -// -// MARKER: Increment this value each time codecgen changes fundamentally. -// Also update codecgen/gen.go (minimumCodecVersion, genVersion, etc). -// Fundamental changes are: -// - helper methods change (signature change, new ones added, some removed, etc) -// - codecgen command line changes -// -// v1: Initial Version -// v2: - -// v3: For Kubernetes: changes in signature of some unpublished helper methods and codecgen cmdline arguments. -// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) -// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -// v6: removed unsafe from gen, and now uses codecgen.exec tag -// v7: - -// v8: current - we now maintain compatibility with old generated code. -// v9: - skipped -// v10: modified encDriver and decDriver interfaces. -// v11: remove deprecated methods of encDriver and decDriver. -// v12: removed deprecated methods from genHelper and changed container tracking logic -// v13: 20190603 removed DecodeString - use DecodeStringAsBytes instead -// v14: 20190611 refactored nil handling: TryDecodeAsNil -> selective TryNil, etc -// v15: 20190626 encDriver.EncodeString handles StringToRaw flag inside handle -// v16: 20190629 refactoring for v1.1.6 -// v17: 20200911 reduce number of types for which we generate fast path functions (v1.1.8) -// v18: 20201004 changed definition of genHelper...Extension (to take interface{}) and eliminated I2Rtid method -// v19: 20201115 updated codecgen cmdline flags and optimized output -// v20: 20201120 refactored GenHelper to one exported function -// v21: 20210104 refactored generated code to honor ZeroCopy=true for more efficiency -// v22: 20210118 fixed issue in generated code when encoding a type which is also a codec.Selfer -// v23: 20210203 changed slice/map types for which we generate fast-path functions -// v24: 20210226 robust handling for Canonical|CheckCircularRef flags and MissingFielder implementations -// v25: 20210406 pass base reflect.Type to side(En|De)code and (En|De)codeExt calls -// v26: 20230201 genHelper changes for more inlining and consequent performance -// v27: 20230219 fix error decoding struct from array - due to misplaced counter increment -// v28: 20230224 fix decoding missing fields of struct from array, due to double counter increment -const genVersion = 28 const ( - genCodecPkg = "codec1978" // MARKER: keep in sync with codecgen/gen.go - genTempVarPfx = "yy" genTopLevelVarName = "x" - // ignore canBeNil parameter, and always set to true. - // This is because nil can appear anywhere, so we should always check. - genAnythingCanBeNil = true - - // genStructCanonical configures whether we generate 2 paths based on Canonical flag - // when encoding struct fields. - genStructCanonical = true - - // genFastpathCanonical configures whether we support Canonical in fast path. - // The savings is not much. + // genFastpathCanonical configures whether we support Canonical in fast path. Low savings. // - // MARKER: This MUST ALWAYS BE TRUE. fast-path.go.tmp doesn't handle it being false. + // MARKER: This MUST ALWAYS BE TRUE. fastpath.go.tmpl doesn't handle it being false. genFastpathCanonical = true // genFastpathTrimTypes configures whether we trim uncommon fastpath types. genFastpathTrimTypes = true ) -type genStringDecAsBytes string -type genStringDecZC string - -var genStringDecAsBytesTyp = reflect.TypeOf(genStringDecAsBytes("")) -var genStringDecZCTyp = reflect.TypeOf(genStringDecZC("")) var genFormats = []string{"Json", "Cbor", "Msgpack", "Binc", "Simple"} var ( errGenAllTypesSamePkg = errors.New("All types must be in the same package") errGenExpectArrayOrMap = errors.New("unexpected type - expecting array/map/slice") - errGenUnexpectedTypeFastpath = errors.New("fast-path: unexpected type - requires map or slice") + errGenUnexpectedTypeFastpath = errors.New("fastpath: unexpected type - requires map or slice") // don't use base64, only 63 characters allowed in valid go identifiers // ie ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_ // // don't use numbers, as a valid go identifer must start with a letter. - genTypenameEnc = base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef") - genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) + genTypenameEnc = base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) ) -type genBuf struct { - buf []byte +// -------- + +func genCheckErr(err error) { + halt.onerror(err) } -func (x *genBuf) sIf(b bool, s, t string) *genBuf { - if b { - x.buf = append(x.buf, s...) - } else { - x.buf = append(x.buf, t...) - } - return x -} -func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } -func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } -func (x *genBuf) v() string { return string(x.buf) } -func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } -func (x *genBuf) reset() { - if x.buf != nil { - x.buf = x.buf[:0] - } -} - -// genRunner holds some state used during a Gen run. -type genRunner struct { - w io.Writer // output - c uint64 // counter used for generating varsfx - f uint64 // counter used for saying false - - t []reflect.Type // list of types to run selfer on - tc reflect.Type // currently running selfer on this type - te map[uintptr]bool // types for which the encoder has been created - td map[uintptr]bool // types for which the decoder has been created - tz map[uintptr]bool // types for which GenIsZero has been created - - cp string // codec import path - - im map[string]reflect.Type // imports to add - imn map[string]string // package names of imports to add - imc uint64 // counter for import numbers - - is map[reflect.Type]struct{} // types seen during import search - bp string // base PkgPath, for which we are generating for - - cpfx string // codec package prefix - - ty map[reflect.Type]struct{} // types for which GenIsZero *should* be created - tm map[reflect.Type]struct{} // types for which enc/dec must be generated - ts []reflect.Type // types for which enc/dec must be generated - - xs string // top level variable/constant suffix - hn string // fn helper type name - - ti *TypeInfos - // rr *rand.Rand // random generator for file-specific types - - jsonOnlyWhen, toArrayWhen, omitEmptyWhen *bool - - nx bool // no extensions -} - -type genIfClause struct { - hasIf bool -} - -func (g *genIfClause) end(x *genRunner) { - if g.hasIf { - x.line("}") - } -} - -func (g *genIfClause) c(last bool) (v string) { - if last { - if g.hasIf { - v = " } else { " - } - } else if g.hasIf { - v = " } else if " - } else { - v = "if " - g.hasIf = true - } - return -} - -// Gen will write a complete go file containing Selfer implementations for each -// type passed. All the types must be in the same package. -// -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. -func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, - jsonOnlyWhen, toArrayWhen, omitEmptyWhen *bool, - ti *TypeInfos, types ...reflect.Type) (warnings []string) { - // All types passed to this method do not have a codec.Selfer method implemented directly. - // codecgen already checks the AST and skips any types that define the codec.Selfer methods. - // Consequently, there's no need to check and trim them if they implement codec.Selfer - - if len(types) == 0 { - return - } - x := genRunner{ - w: w, - t: types, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - tz: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ty: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(types[0]), - xs: uid, - ti: ti, - jsonOnlyWhen: jsonOnlyWhen, - toArrayWhen: toArrayWhen, - omitEmptyWhen: omitEmptyWhen, - - nx: noExtensions, - } - if x.ti == nil { - x.ti = defTypeInfos - } - if x.xs == "" { - rr := rand.New(rand.NewSource(time.Now().UnixNano())) - x.xs = strconv.FormatInt(rr.Int63n(9999), 10) - } - - // gather imports first: - x.cp = genImportPath(reflect.TypeOf(x)) - x.imn[x.cp] = genCodecPkg - - // iterate, check if all in same package, and remove any missingfielders - for i := 0; i < len(x.t); { - t := x.t[i] - // xdebugf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - if genImportPath(t) != x.bp { - halt.onerror(errGenAllTypesSamePkg) - } - ti1 := x.ti.get(rt2id(t), t) - if ti1.flagMissingFielder || ti1.flagMissingFielderPtr { - // output diagnostic message - that nothing generated for this type - warnings = append(warnings, fmt.Sprintf("type: '%v' not generated; implements codec.MissingFielder", t)) - copy(x.t[i:], x.t[i+1:]) - x.t = x.t[:len(x.t)-1] - continue - } - x.genRefPkgs(t) - i++ - } - - x.line("// +build go1.6") - if buildTags != "" { - x.line("// +build " + buildTags) - } - x.line(` - -// Code generated by codecgen - DO NOT EDIT. - -`) - x.line("package " + pkgName) - x.line("") - x.line("import (") - if x.cp != x.bp { - x.cpfx = genCodecPkg + "." - x.linef("%s \"%s\"", genCodecPkg, x.cp) - } - // use a sorted set of im keys, so that we can get consistent output - imKeys := make([]string, 0, len(x.im)) - for k := range x.im { - imKeys = append(imKeys, k) - } - sort.Strings(imKeys) - for _, k := range imKeys { // for k, _ := range x.im { - if k == x.imn[k] { - x.linef("\"%s\"", k) - } else { - x.linef("%s \"%s\"", x.imn[k], k) - } - } - // add required packages - for _, k := range [...]string{"runtime", "errors", "strconv", "sort"} { // "reflect", "fmt" - if _, ok := x.im[k]; !ok { - x.line("\"" + k + "\"") - } - } - x.line(")") - x.line("") - - x.line("const (") - x.linef("// ----- content types ----") - x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) - x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) - x.linef("// ----- value types used ----") - for _, vt := range [...]valueType{ - valueTypeArray, valueTypeMap, valueTypeString, - valueTypeInt, valueTypeUint, valueTypeFloat, - valueTypeNil, - } { - x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) - } - - x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) - x.linef("codecSelferDecContainerLenNil%s = %d", x.xs, int64(containerLenNil)) - x.line(")") - x.line("var (") - x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = " + "errors.New(`only encoded map or array can be decoded into a struct`)") - x.line("_ sort.Interface = nil") - x.line(")") - x.line("") - - x.hn = "codecSelfer" + x.xs - x.line("type " + x.hn + " struct{}") - x.line("") - x.linef("func %sFalse() bool { return false }", x.hn) - x.linef("func %sTrue() bool { return true }", x.hn) - x.line("") - - // add types for sorting canonical - for _, s := range []string{"string", "uint64", "int64", "float64"} { - x.linef("type %s%sSlice []%s", x.hn, s, s) - x.linef("func (p %s%sSlice) Len() int { return len(p) }", x.hn, s) - x.linef("func (p %s%sSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }", x.hn, s) - x.linef("func (p %s%sSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] }", x.hn, s) - } - - x.line("") - x.varsfxreset() - x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) - x.line("_, file, _, _ := runtime.Caller(0)") - x.linef("ver := strconv.FormatInt(int64(%sGenVersion), 10)", x.cpfx) - x.outf(`panic(errors.New("codecgen version mismatch: current: %v, need " + ver + ". Re-generate file: " + file))`, genVersion) - x.linef("}") - if len(imKeys) > 0 { - x.line("if false { // reference the types, but skip this branch at build/run time") - for _, k := range imKeys { - t := x.im[k] - x.linef("var _ %s.%s", x.imn[k], t.Name()) - } - x.line("} ") // close if false - } - x.line("}") // close init - x.line("") - - // generate rest of type info - for _, t := range x.t { - x.tc = t - x.linef("func (%s) codecSelferViaCodecgen() {}", x.genTypeName(t)) - x.selfer(true) - x.selfer(false) - x.tryGenIsZero(t) - } - - for _, t := range x.ts { - rtid := rt2id(t) - // generate enc functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(true) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.encListFallback("v", t) - case reflect.Map: - x.encMapFallback("v", t) - default: - halt.onerror(errGenExpectArrayOrMap) - } - x.line("}") - x.line("") - - // generate dec functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(false) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.decListFallback("v", rtid, t) - case reflect.Map: - x.decMapFallback("v", rtid, t) - default: - halt.onerror(errGenExpectArrayOrMap) - } - x.line("}") - x.line("") - } - - for t := range x.ty { - x.tryGenIsZero(t) - x.line("") - } - - x.line("") - return -} - -func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { - // return varname != genTopLevelVarName && t != x.tc - // the only time we checkForSelfer is if we are not at the TOP of the generated code. - return varname != genTopLevelVarName -} - -func (x *genRunner) arr2str(t reflect.Type, s string) string { - if t.Kind() == reflect.Array { - return s - } - return "" -} - -func (x *genRunner) genRequiredMethodVars(encode bool) { - x.line("var h " + x.hn) - if encode { - x.line("z, r := " + x.cpfx + "GenHelper().Encoder(e)") - } else { - x.line("z, r := " + x.cpfx + "GenHelper().Decoder(d)") - } - x.line("_, _, _ = h, z, r") -} - -func (x *genRunner) genRefPkgs(t reflect.Type) { - if _, ok := x.is[t]; ok { - return - } - x.is[t] = struct{}{} - tpkg, tname := genImportPath(t), t.Name() - if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { - if _, ok := x.im[tpkg]; !ok { - x.im[tpkg] = t - if idx := strings.LastIndex(tpkg, "/"); idx < 0 { - x.imn[tpkg] = tpkg - } else { - x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) - } - } - } - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: - x.genRefPkgs(t.Elem()) - case reflect.Map: - x.genRefPkgs(t.Elem()) - x.genRefPkgs(t.Key()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { - x.genRefPkgs(t.Field(i).Type) - } - } - } -} - -// sayFalse will either say "false" or use a function call that returns false. -func (x *genRunner) sayFalse() string { - x.f++ - if x.f%2 == 0 { - return x.hn + "False()" - } - return "false" -} - -// sayFalse will either say "true" or use a function call that returns true. -func (x *genRunner) sayTrue() string { - x.f++ - if x.f%2 == 0 { - return x.hn + "True()" - } - return "true" -} - -func (x *genRunner) varsfx() string { - x.c++ - return strconv.FormatUint(x.c, 10) -} - -func (x *genRunner) varsfxreset() { - x.c = 0 -} - -func (x *genRunner) out(s string) { - _, err := io.WriteString(x.w, s) - genCheckErr(err) -} - -func (x *genRunner) outf(s string, params ...interface{}) { - _, err := fmt.Fprintf(x.w, s, params...) - genCheckErr(err) -} - -func (x *genRunner) line(s string) { - x.out(s) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) lineIf(s string) { - if s != "" { - x.line(s) - } -} - -func (x *genRunner) linef(s string, params ...interface{}) { - x.outf(s, params...) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) genTypeName(t reflect.Type) (n string) { - // if the type has a PkgPath, which doesn't match the current package, - // then include it. - // We cannot depend on t.String() because it includes current package, - // or t.PkgPath because it includes full import path, - // - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "*" - t = t.Elem() - } - if tn := t.Name(); tn != "" { - return ptrPfx + x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) - case reflect.Slice: - return ptrPfx + "[]" + x.genTypeName(t.Elem()) - case reflect.Array: - return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) - case reflect.Chan: - return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + case "[]byte", "[]uint8", "bytes": + return "Bytes" default: - if t == intfTyp { - return ptrPfx + "interface{}" - } else { - return ptrPfx + x.genTypeNamePrim(t) - } + return strings.ToUpper(s[0:1]) + s[1:] } } -func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { - if t.Name() == "" { - return t.String() - } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { - return t.Name() - } else { - return x.imn[genImportPath(t)] + "." + t.Name() - // return t.String() // best way to get the package name inclusive - } -} - -func (x *genRunner) genZeroValueR(t reflect.Type) string { - // if t is a named type, w - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, - reflect.Slice, reflect.Map, reflect.Invalid: - return "nil" - case reflect.Bool: - return "false" - case reflect.String: - return `""` - case reflect.Struct, reflect.Array: - return x.genTypeName(t) + "{}" - default: // all numbers - return "0" - } -} - -func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { - return genMethodNameT(t, x.tc) -} - -func (x *genRunner) tryGenIsZero(t reflect.Type) (done bool) { - if t.Kind() != reflect.Struct || t.Implements(isCodecEmptyerTyp) { - return - } - - rtid := rt2id(t) - - if _, ok := x.tz[rtid]; ok { - delete(x.ty, t) - return - } - - x.tz[rtid] = true - delete(x.ty, t) - - ti := x.ti.get(rtid, t) - tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing. - varname := genTopLevelVarName - - x.linef("func (%s *%s) IsCodecEmpty() bool {", varname, x.genTypeName(t)) - - anonSeen := make(map[reflect.Type]bool) - var omitline genBuf - for _, si := range tisfi { - if si.path.parent != nil { - root := si.path.root() - if anonSeen[root.typ] { - continue - } - anonSeen[root.typ] = true - } - t2 := genOmitEmptyLinePreChecks(varname, t, si, &omitline, true) - // if Ptr, we already checked if nil above - if t2.Type.Kind() != reflect.Ptr { - x.doEncOmitEmptyLine(t2, varname, &omitline) - omitline.s(" || ") - } - } - omitline.s(" false") - x.linef("return !(%s)", omitline.v()) - - x.line("}") - x.line("") - return true -} - -func (x *genRunner) selfer(encode bool) { - t := x.tc - // ti := x.ti.get(rt2id(t), t) - t0 := t - // always make decode use a pointer receiver, - // and structs/arrays always use a ptr receiver (encode|decode) - isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) - x.varsfxreset() - - fnSigPfx := "func (" + genTopLevelVarName + " " - if isptr { - fnSigPfx += "*" - } - fnSigPfx += x.genTypeName(t) - x.out(fnSigPfx) - - if isptr { - t = reflect.PtrTo(t) - } - if encode { - x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") - x.genRequiredMethodVars(true) - if t0.Kind() == reflect.Struct { - x.linef("if z.EncBasicHandle().CheckCircularRef { z.EncEncode(%s); return }", genTopLevelVarName) - } - x.encVar(genTopLevelVarName, t) - } else { - x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - // do not use decVar, as there is no need to check TryDecodeAsNil - // or way to elegantly handle that, and also setting it to a - // non-nil value doesn't affect the pointer passed. - // x.decVar(genTopLevelVarName, t, false) - x.dec(genTopLevelVarName, t0, true) - } - x.line("}") - x.line("") - - if encode || t0.Kind() != reflect.Struct { - return - } - - // write is containerMap - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0) - x.line("}") - x.line("") - - // write containerArray - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) - x.line("}") - x.line("") - -} - -// used for chan, array, slice, map -func (x *genRunner) xtraSM(varname string, t reflect.Type, ti *typeInfo, encode, isptr bool) { - var ptrPfx, addrPfx string - if isptr { - ptrPfx = "*" - } else { - addrPfx = "&" - } - if encode { - x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) - } else { - x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) - } - x.registerXtraT(t, ti) -} - -func (x *genRunner) registerXtraT(t reflect.Type, ti *typeInfo) { - // recursively register the types - tk := t.Kind() - if tk == reflect.Ptr { - x.registerXtraT(t.Elem(), nil) - return - } - if _, ok := x.tm[t]; ok { - return - } - - switch tk { - case reflect.Chan, reflect.Slice, reflect.Array, reflect.Map: - default: - return - } - // only register the type if it will not default to a fast-path - if ti == nil { - ti = x.ti.get(rt2id(t), t) - } - if _, rtidu := genFastpathUnderlying(t, ti.rtid, ti); fastpathAvIndex(rtidu) != -1 { - return - } - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) - // check if this refers to any xtra types eg. a slice of array: add the array - x.registerXtraT(t.Elem(), nil) - if tk == reflect.Map { - x.registerXtraT(t.Key(), nil) - } -} - -// encVar will encode a variable. -// The parameter, t, is the reflect.Type of the variable itself -func (x *genRunner) encVar(varname string, t reflect.Type) { - var checkNil bool - // case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: - // do not include checkNil for slice and maps, as we already checkNil below it - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan: - checkNil = true - } - x.encVarChkNil(varname, t, checkNil) -} - -func (x *genRunner) encVarChkNil(varname string, t reflect.Type, checkNil bool) { - if checkNil { - x.linef("if %s == nil { r.EncodeNil() } else {", varname) - } - - switch t.Kind() { - case reflect.Ptr: - telem := t.Elem() - tek := telem.Kind() - if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { - x.enc(varname, genNonPtr(t), true) - break - } - i := x.varsfx() - x.line(genTempVarPfx + i + " := *" + varname) - x.enc(genTempVarPfx+i, genNonPtr(t), false) - case reflect.Struct, reflect.Array: - if t == timeTyp { - x.enc(varname, t, false) - break - } - i := x.varsfx() - x.line(genTempVarPfx + i + " := &" + varname) - x.enc(genTempVarPfx+i, t, true) - default: - x.enc(varname, t, false) - } - - if checkNil { - x.line("}") - } -} - -// enc will encode a variable (varname) of type t, where t represents T. -// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T -// (to prevent copying), -// else t is of type T -func (x *genRunner) enc(varname string, t reflect.Type, isptr bool) { - rtid := rt2id(t) - ti2 := x.ti.get(rtid, t) - // We call CodecEncodeSelf if one of the following are honored: - // - the type already implements Selfer, call that - // - the type has a Selfer implementation just created, use that - // - the type is in the list of the ones we will generate for, but it is not currently being generated - - mi := x.varsfx() - // tptr := reflect.PtrTo(t) - // tk := t.Kind() - - // check if - // - type is time.Time, RawExt, Raw - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - - var hasIf genIfClause - defer hasIf.end(x) // end if block (if necessary) - - var ptrPfx, addrPfx string - if isptr { - ptrPfx = "*" - } else { - addrPfx = "&" - } - - if t == timeTyp { - x.linef("%s z.EncBasicHandle().TimeBuiltin() { r.EncodeTime(%s%s)", hasIf.c(false), ptrPfx, varname) - // return - } - if t == rawTyp { - x.linef("%s z.EncRaw(%s%s)", hasIf.c(true), ptrPfx, varname) - return - } - if t == rawExtTyp { - x.linef("%s r.EncodeRawExt(%s%s)", hasIf.c(true), addrPfx, varname) - return - } - // only check for extensions if extensions are configured, - // and the type is named, and has a packagePath, - // and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer) - if !x.nx && varname != genTopLevelVarName && t != genStringDecAsBytesTyp && - t != genStringDecZCTyp && genImportPath(t) != "" && t.Name() != "" { - yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) - x.linef("%s %s := z.Extension(%s); %s != nil { z.EncExtension(%s, %s) ", - hasIf.c(false), yy, varname, yy, varname, yy) - } - - if x.checkForSelfer(t, varname) { - if ti2.flagSelfer { - x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname) - return - } - if ti2.flagSelferPtr { - if isptr { - x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname) - } else { - x.linef("%s %ssf%s := &%s", hasIf.c(true), genTempVarPfx, mi, varname) - x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) - } - return - } - - if _, ok := x.te[rtid]; ok { - x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname) - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname) - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.te[rtid] = true - rtidAdded = true - } - - if ti2.flagBinaryMarshaler { - x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname) - } else if ti2.flagBinaryMarshalerPtr { - x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%s%v) ", hasIf.c(false), addrPfx, varname) - } - - if ti2.flagJsonMarshaler { - x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname) - } else if ti2.flagJsonMarshalerPtr { - x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%s%v) ", hasIf.c(false), addrPfx, varname) - } else if ti2.flagTextMarshaler { - x.linef("%s !z.EncBinary() { z.EncTextMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname) - } else if ti2.flagTextMarshalerPtr { - x.linef("%s !z.EncBinary() { z.EncTextMarshal(%s%v) ", hasIf.c(false), addrPfx, varname) - } - - x.lineIf(hasIf.c(true)) - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(int64(" + varname + "))") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(uint64(" + varname + "))") - case reflect.Float32: - x.line("r.EncodeFloat32(float32(" + varname + "))") - case reflect.Float64: - x.line("r.EncodeFloat64(float64(" + varname + "))") - case reflect.Complex64: - x.linef("z.EncEncodeComplex64(complex64(%s))", varname) - case reflect.Complex128: - x.linef("z.EncEncodeComplex128(complex128(%s))", varname) - case reflect.Bool: - x.line("r.EncodeBool(bool(" + varname + "))") - case reflect.String: - x.linef("r.EncodeString(string(%s))", varname) - case reflect.Chan: - x.xtraSM(varname, t, ti2, true, false) - // x.encListFallback(varname, rtid, t) - case reflect.Array: - _, rtidu := genFastpathUnderlying(t, rtid, ti2) - if fastpathAvIndex(rtidu) != -1 { - g := x.newFastpathGenV(ti2.key) - x.linef("z.F.%sV((%s)(%s[:]), e)", g.MethodNamePfx("Enc", false), x.genTypeName(ti2.key), varname) - } else { - x.xtraSM(varname, t, ti2, true, true) - } - case reflect.Slice: - // if nil, call dedicated function - // if a []byte, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - - x.linef("if %s == nil { r.EncodeNil() } else {", varname) - if rtid == uint8SliceTypId { - x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))") - } else { - tu, rtidu := genFastpathUnderlying(t, rtid, ti2) - if fastpathAvIndex(rtidu) != -1 { - g := x.newFastpathGenV(tu) - if rtid == rtidu { - x.linef("z.F.%sV(%s, e)", g.MethodNamePfx("Enc", false), varname) - } else { - x.linef("z.F.%sV((%s)(%s), e)", g.MethodNamePfx("Enc", false), x.genTypeName(tu), varname) - } - } else { - x.xtraSM(varname, t, ti2, true, false) - } - } - x.linef("} // end block: if %s slice == nil", varname) - case reflect.Map: - // if nil, call dedicated function - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - x.linef("if %s == nil { r.EncodeNil() } else {", varname) - tu, rtidu := genFastpathUnderlying(t, rtid, ti2) - if fastpathAvIndex(rtidu) != -1 { - g := x.newFastpathGenV(tu) - if rtid == rtidu { - x.linef("z.F.%sV(%s, e)", g.MethodNamePfx("Enc", false), varname) - } else { - x.linef("z.F.%sV((%s)(%s), e)", g.MethodNamePfx("Enc", false), x.genTypeName(tu), varname) - } - } else { - x.xtraSM(varname, t, ti2, true, false) - } - x.linef("} // end block: if %s map == nil", varname) - case reflect.Struct: - if !inlist { - delete(x.te, rtid) - x.line("z.EncFallback(" + varname + ")") - break - } - x.encStruct(varname, rtid, t) - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.EncFallback(" + varname + ")") - } -} - -func (x *genRunner) encZero(t reflect.Type) { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(0)") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(0)") - case reflect.Float32: - x.line("r.EncodeFloat32(0)") - case reflect.Float64: - x.line("r.EncodeFloat64(0)") - case reflect.Complex64: - x.line("z.EncEncodeComplex64(0)") - case reflect.Complex128: - x.line("z.EncEncodeComplex128(0)") - case reflect.Bool: - x.line("r.EncodeBool(false)") - case reflect.String: - x.linef(`r.EncodeString("")`) - default: - x.line("r.EncodeNil()") - } -} - -func genOmitEmptyLinePreChecks(varname string, t reflect.Type, si *structFieldInfo, omitline *genBuf, oneLevel bool) (t2 reflect.StructField) { - // xdebug2f("calling genOmitEmptyLinePreChecks on: %v", t) - t2typ := t - varname3 := varname - // go through the loop, record the t2 field explicitly, - // and gather the omit line if embedded in pointers. - fullpath := si.path.fullpath() - for i, path := range fullpath { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(path.index)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - // do not include actual field in the omit line. - // that is done subsequently (right after - below). - if i+1 < len(fullpath) && t2typ.Kind() == reflect.Ptr { - omitline.s(varname3).s(" != nil && ") - } - if oneLevel { - break - } - } - return -} - -func (x *genRunner) doEncOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { - x.f = 0 - x.encOmitEmptyLine(t2, varname, buf) -} - -func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { - // xdebugf("calling encOmitEmptyLine on: %v", t2.Type) - // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices, check if len ! 0 (not if == zero value) - varname2 := varname + "." + t2.Name - switch t2.Type.Kind() { - case reflect.Struct: - rtid2 := rt2id(t2.Type) - ti2 := x.ti.get(rtid2, t2.Type) - // xdebugf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) - if ti2.rtid == timeTypId { - buf.s("!(").s(varname2).s(".IsZero())") - break - } - if ti2.flagIsZeroerPtr || ti2.flagIsZeroer { - buf.s("!(").s(varname2).s(".IsZero())") - break - } - if t2.Type.Implements(isCodecEmptyerTyp) { - buf.s("!(").s(varname2).s(".IsCodecEmpty())") - break - } - _, ok := x.tz[rtid2] - if ok { - buf.s("!(").s(varname2).s(".IsCodecEmpty())") - break - } - // if we *should* create a IsCodecEmpty for it, but haven't yet, add it here - // _, ok = x.ty[rtid2] - if genImportPath(t2.Type) == x.bp { - x.ty[t2.Type] = struct{}{} - buf.s("!(").s(varname2).s(".IsCodecEmpty())") - break - } - if ti2.flagComparable { - buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) - break - } - // buf.s("(") - buf.s(x.sayFalse()) // buf.s("false") - var wrote bool - for i, n := 0, t2.Type.NumField(); i < n; i++ { - f := t2.Type.Field(i) - if f.PkgPath != "" { // unexported - continue - } - buf.s(" || ") - x.encOmitEmptyLine(f, varname2, buf) - wrote = true - } - if !wrote { - buf.s(" || ").s(x.sayTrue()) - } - //buf.s(")") - case reflect.Bool: - buf.s("bool(").s(varname2).s(")") - case reflect.Map, reflect.Slice, reflect.Chan: - buf.s("len(").s(varname2).s(") != 0") - case reflect.Array: - tlen := t2.Type.Len() - if tlen == 0 { - buf.s(x.sayFalse()) - } else if t2.Type.Comparable() { - buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) - } else { // then we cannot even compare the individual values - // TODO use playground to check if you can compare to a - // zero value of an array, even if array not comparable. - buf.s(x.sayTrue()) - } - default: - buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) - } -} - -func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { - // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) - // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it - - // if t === type currently running selfer on, do for all - ti := x.ti.get(rtid, t) - i := x.varsfx() - // sepVarname := genTempVarPfx + "sep" + i - numfieldsvar := genTempVarPfx + "q" + i - ti2arrayvar := genTempVarPfx + "r" + i - struct2arrvar := genTempVarPfx + "2arr" + i - - tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing. - - type genFQN struct { - i string - fqname string - nilLine genBuf - nilVar string - canNil bool - sf reflect.StructField - } - - genFQNs := make([]genFQN, len(tisfi)) - si2Pos := make(map[*structFieldInfo]int) // stores position in sorted structFieldInfos - - for j, si := range tisfi { - si2Pos[si] = j - q := &genFQNs[j] - q.i = x.varsfx() - q.nilVar = genTempVarPfx + "n" + q.i - q.canNil = false - q.fqname = varname - { - t2typ := t - fullpath := si.path.fullpath() - for _, path := range fullpath { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - q.sf = t2typ.Field(int(path.index)) - t2typ = q.sf.Type - q.fqname += "." + q.sf.Name - if t2typ.Kind() == reflect.Ptr { - if !q.canNil { - q.nilLine.f("%s == nil", q.fqname) - q.canNil = true - } else { - q.nilLine.f(" || %s == nil", q.fqname) - } - } - } - } - } - - // x.line(sepVarname + " := !z.EncBinary()") - x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) - // x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) - x.linef("_ = %s", struct2arrvar) - x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) - - for j := range genFQNs { - q := &genFQNs[j] - if q.canNil { - x.linef("var %s bool = %s", q.nilVar, q.nilLine.v()) - } - } - - // var nn int - // due to omitEmpty, we need to calculate the - // number of non-empty things we write out first. - // This is required as we need to pre-determine the size of the container, - // to support length-prefixing. - omitEmptySometimes := x.omitEmptyWhen == nil - omitEmptyAlways := (x.omitEmptyWhen != nil && *(x.omitEmptyWhen)) - // omitEmptyNever := (x.omitEmptyWhen != nil && !*(x.omitEmptyWhen)) - - toArraySometimes := x.toArrayWhen == nil - toArrayAlways := (x.toArrayWhen != nil && *(x.toArrayWhen)) - toArrayNever := (x.toArrayWhen != nil && !(*(x.toArrayWhen))) - - if (omitEmptySometimes && ti.anyOmitEmpty) || omitEmptyAlways { - x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) - - for _, si := range tisfi { - if omitEmptySometimes && !si.path.omitEmpty { - x.linef("true, // %s", si.encName) // si.fieldName) - continue - } - var omitline genBuf - t2 := genOmitEmptyLinePreChecks(varname, t, si, &omitline, false) - x.doEncOmitEmptyLine(t2, varname, &omitline) - x.linef("%s, // %s", omitline.v(), si.encName) // si.fieldName) - } - x.line("}") - x.linef("_ = %s", numfieldsvar) - } - - if toArraySometimes { - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray - } - if toArraySometimes || toArrayAlways { - x.linef("z.EncWriteArrayStart(%d)", len(tisfi)) - - for j, si := range tisfi { - doOmitEmptyCheck := (omitEmptySometimes && si.path.omitEmpty) || omitEmptyAlways - q := &genFQNs[j] - // if the type of the field is a Selfer, or one of the ones - if q.canNil { - x.linef("if %s { z.EncWriteArrayElem(); r.EncodeNil() } else { ", q.nilVar) - } - x.linef("z.EncWriteArrayElem()") - if doOmitEmptyCheck { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.encVarChkNil(q.fqname, q.sf.Type, false) - if doOmitEmptyCheck { - x.linef("} else {") - x.encZero(q.sf.Type) - x.linef("}") - } - if q.canNil { - x.line("}") - } - } - - x.line("z.EncWriteArrayEnd()") - } - if toArraySometimes { - x.linef("} else {") // if not ti.toArray - } - if toArraySometimes || toArrayNever { - if (omitEmptySometimes && ti.anyOmitEmpty) || omitEmptyAlways { - x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("z.EncWriteMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - } else { - x.linef("z.EncWriteMapStart(%d)", len(tisfi)) - } - - fn := func(tisfi []*structFieldInfo) { - // tisfi here may be source or sorted, so use the src position stored elsewhere - for _, si := range tisfi { - pos := si2Pos[si] - q := &genFQNs[pos] - doOmitEmptyCheck := (omitEmptySometimes && si.path.omitEmpty) || omitEmptyAlways - if doOmitEmptyCheck { - x.linef("if %s[%v] {", numfieldsvar, pos) - } - x.linef("z.EncWriteMapElemKey()") - - // emulate EncStructFieldKey - switch ti.keyType { - case valueTypeInt: - x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) - case valueTypeUint: - x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) - case valueTypeFloat: - x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) - default: // string - if x.jsonOnlyWhen == nil { - if si.path.encNameAsciiAlphaNum { - x.linef(`if z.IsJSONHandle() { z.EncWr().WriteStr("\"%s\"") } else { `, si.encName) - } - x.linef("r.EncodeString(`%s`)", si.encName) - if si.path.encNameAsciiAlphaNum { - x.linef("}") - } - } else if *(x.jsonOnlyWhen) { - if si.path.encNameAsciiAlphaNum { - x.linef(`z.EncWr().WriteStr("\"%s\"")`, si.encName) - } else { - x.linef("r.EncodeString(`%s`)", si.encName) - } - } else { - x.linef("r.EncodeString(`%s`)", si.encName) - } - } - x.line("z.EncWriteMapElemValue()") - if q.canNil { - x.line("if " + q.nilVar + " { r.EncodeNil() } else { ") - x.encVarChkNil(q.fqname, q.sf.Type, false) - x.line("}") - } else { - x.encVarChkNil(q.fqname, q.sf.Type, false) - } - if doOmitEmptyCheck { - x.line("}") - } - } - } - - if genStructCanonical { - x.linef("if z.EncBasicHandle().Canonical {") // if Canonical block - fn(ti.sfi.sorted()) - x.linef("} else {") // else !Canonical block - fn(ti.sfi.source()) - x.linef("}") // end if Canonical block - } else { - fn(tisfi) - } - - x.line("z.EncWriteMapEnd()") - } - if toArraySometimes { - x.linef("} ") // end if/else ti.toArray - } -} - -func (x *genRunner) encListFallback(varname string, t reflect.Type) { - x.linef("if %s == nil { r.EncodeNil(); return }", varname) - elemBytes := t.Elem().Kind() == reflect.Uint8 - if t.AssignableTo(uint8SliceTyp) { - x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname) - return - } - if t.Kind() == reflect.Array && elemBytes { - x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname) - return - } - i := x.varsfx() - if t.Kind() == reflect.Chan { - type ts struct { - Label, Chan, Slice, Sfx string - } - tm, err := template.New("").Parse(genEncChanTmpl) - genCheckErr(err) - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) - err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) - genCheckErr(err) - if elemBytes { - x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i) - x.line("}") - return - } - varname = "sch" + i - } - - x.line("z.EncWriteArrayStart(len(" + varname + "))") - - // x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - // x.linef("z.EncWriteArrayElem()") - // x.encVar(genTempVarPfx+"v"+i, t.Elem()) - // x.line("}") - - x.linef("for %sv%s := range %s {", genTempVarPfx, i, varname) - x.linef("z.EncWriteArrayElem()") - x.encVar(fmt.Sprintf("%s[%sv%s]", varname, genTempVarPfx, i), t.Elem()) - x.line("}") - - x.line("z.EncWriteArrayEnd()") - if t.Kind() == reflect.Chan { - x.line("}") - } -} - -func (x *genRunner) encMapFallback(varname string, t reflect.Type) { - x.linef("if %s == nil { r.EncodeNil()", varname) - x.line("} else if z.EncBasicHandle().Canonical {") - - // Solve for easy case accomodated by sort package without reflection i.e. - // map keys of type: float, int, string (pre-defined/builtin types). - // - // To do this, we will get the keys into an array of uint64|float64|string, - // sort them, then write them out, and grab the value and encode it appropriately - tkey := t.Key() - tkind := tkey.Kind() - // tkeybase := tkey - // for tkeybase.Kind() == reflect.Ptr { - // tkeybase = tkeybase.Elem() - // } - // tikey := x.ti.get(rt2id(tkeybase), tkeybase) - - // pre-defined types have a name and no pkgpath and appropriate kind - predeclared := tkey.PkgPath() == "" && tkey.Name() != "" - - canonSortKind := reflect.Invalid - switch tkind { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - canonSortKind = reflect.Int64 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - canonSortKind = reflect.Uint64 - case reflect.Float32, reflect.Float64: - canonSortKind = reflect.Float64 - case reflect.String: - canonSortKind = reflect.String - } - - var i string = x.varsfx() - - fnCanonNumBoolStrKind := func() { - if !predeclared { - x.linef("var %svv%s %s", genTempVarPfx, i, x.genTypeName(tkey)) - x.linef("%sencfn%s := z.EncFnGivenAddr(&%svv%s)", genTempVarPfx, i, genTempVarPfx, i) - } - // get the type, get the slice type its mapped to, and complete the code - x.linef("%ss%s := make([]%s, 0, len(%s))", genTempVarPfx, i, canonSortKind, varname) - x.linef("for k, _ := range %s {", varname) - x.linef(" %ss%s = append(%ss%s, %s(k))", genTempVarPfx, i, genTempVarPfx, i, canonSortKind) - x.linef("}") - x.linef("sort.Sort(%s%sSlice(%ss%s))", x.hn, canonSortKind, genTempVarPfx, i) - x.linef("z.EncWriteMapStart(len(%s))", varname) - x.linef("for _, %sv%s := range %ss%s {", genTempVarPfx, i, genTempVarPfx, i) - x.linef(" z.EncWriteMapElemKey()") - if predeclared { - switch tkind { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: - x.linef("r.EncodeInt(int64(%sv%s))", genTempVarPfx, i) - case reflect.Int64: - x.linef("r.EncodeInt(%sv%s)", genTempVarPfx, i) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: - x.linef("r.EncodeUint(%sv%s)", genTempVarPfx, i) - case reflect.Uint64: - x.linef("r.EncodeUint(uint64(%sv%s))", genTempVarPfx, i) - case reflect.Float32: - x.linef("r.EncodeFloat32(float32(%sv%s))", genTempVarPfx, i) - case reflect.Float64: - x.linef("r.EncodeFloat64(%sv%s)", genTempVarPfx, i) - case reflect.String: - x.linef("r.EncodeString(%sv%s)", genTempVarPfx, i) - } - } else { - x.linef("%svv%s = %s(%sv%s)", genTempVarPfx, i, x.genTypeName(tkey), genTempVarPfx, i) - x.linef("z.EncEncodeNumBoolStrKindGivenAddr(&%svv%s, %sencfn%s)", genTempVarPfx, i, genTempVarPfx, i) - } - x.linef(" z.EncWriteMapElemValue()") - vname := genTempVarPfx + "e" + i - if predeclared { - x.linef("%s := %s[%s(%sv%s)]", vname, varname, x.genTypeName(tkey), genTempVarPfx, i) - } else { - x.linef("%s := %s[%svv%s]", vname, varname, genTempVarPfx, i) - } - x.encVar(vname, t.Elem()) - x.linef("}") - - x.line("z.EncWriteMapEnd()") - - } - - // if canonSortKind != reflect.Invalid && !tikey.flagMarshalInterface { - // if predeclared { - // fnCanonNumBoolStrKind() - // } else { - // // handle if an extension - // x.linef("if z.Extension(%s(%s)) != nil { z.EncEncodeMapNonNil(%s) } else {", - // x.genTypeName(tkey), x.genZeroValueR(tkey), varname) - // fnCanonNumBoolStrKind() - // x.line("}") - // } - // } else { - // x.linef("z.EncEncodeMapNonNil(%s)", varname) - // } - - if canonSortKind != reflect.Invalid { - fnCanonNumBoolStrKind() - } else { - x.linef("z.EncEncodeMapNonNil(%s)", varname) - } - - x.line("} else {") - - x.linef("z.EncWriteMapStart(len(%s))", varname) - x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - x.linef("z.EncWriteMapElemKey()") - x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.line("z.EncWriteMapElemValue()") - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.line("z.EncWriteMapEnd()") - - x.line("}") -} - -func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, - newbuf, nilbuf *genBuf) (varname3 string, t2 reflect.StructField) { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - varname3 = varname - t2typ := t - t2kind := t2typ.Kind() - var nilbufed bool - if si != nil { - fullpath := si.path.fullpath() - for _, path := range fullpath { - // only one-level pointers can be seen in a type - if t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(int(path.index)) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - t2kind = t2typ.Kind() - if t2kind != reflect.Ptr { - continue - } - if newbuf != nil { - if len(newbuf.buf) > 0 { - newbuf.s("\n") - } - newbuf.f("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - if nilbuf != nil { - if !nilbufed { - nilbuf.s("if ").s(varname3).s(" != nil") - nilbufed = true - } else { - nilbuf.s(" && ").s(varname3).s(" != nil") - } - } - } - } - if nilbuf != nil { - if nilbufed { - nilbuf.s(" { ").s("// remove the if-true\n") - } - if nilvar != "" { - nilbuf.s(nilvar).s(" = true") - } else if tk := t2typ.Kind(); tk == reflect.Ptr { - if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { - nilbuf.s(varname3).s(" = nil") - } else { - nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) - } - } else { - nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) - } - if nilbufed { - nilbuf.s("}") - } - } - return -} - -// decVar takes a variable called varname, of type t -func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - var varname2 string - if t.Kind() != reflect.Ptr { - if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { - x.dec(varname, t, false) - } - } else { - if checkNotNil { - x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) - } - // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). - // There's a chance of a **T in here which is nil. - var ptrPfx string - for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { - ptrPfx += "*" - if checkNotNil { - x.linef("if %s%s == nil { %s%s = new(%s)}", ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) - } - } - // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. - - if ptrPfx == "" { - x.dec(varname, t, true) - } else { - varname2 = genTempVarPfx + "z" + rand - x.line(varname2 + " := " + ptrPfx + varname) - x.dec(varname2, t, true) - } - } -} - -// decVar takes a variable called varname, of type t -func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { - - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - - i := x.varsfx() - if t.Kind() == reflect.Ptr { - var buf genBuf - x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) - x.linef("if r.TryNil() { %s } else {", buf.buf) - x.decVarMain(varname, i, t, checkNotNil) - x.line("} ") - } else { - x.decVarMain(varname, i, t, checkNotNil) - } -} - -// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. -func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { - // assumptions: - // - the varname is to a pointer already. No need to take address of it - // - t is always a baseType T (not a *T, etc). - rtid := rt2id(t) - ti2 := x.ti.get(rtid, t) - - // check if - // - type is time.Time, Raw, RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - - mi := x.varsfx() - - var hasIf genIfClause - defer hasIf.end(x) - - var ptrPfx, addrPfx string - if isptr { - ptrPfx = "*" - } else { - addrPfx = "&" - } - if t == timeTyp { - x.linef("%s z.DecBasicHandle().TimeBuiltin() { %s%v = r.DecodeTime()", hasIf.c(false), ptrPfx, varname) - // return - } - if t == rawTyp { - x.linef("%s %s%v = z.DecRaw()", hasIf.c(true), ptrPfx, varname) - return - } - - if t == rawExtTyp { - x.linef("%s r.DecodeExt(%s%v, 0, nil)", hasIf.c(true), addrPfx, varname) - return - } - - // only check for extensions if extensions are configured, - // and the type is named, and has a packagePath, - // and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer) - // xdebugf("genRunner.dec: varname: %v, t: %v, genImportPath: %v, t.Name: %v", varname, t, genImportPath(t), t.Name()) - if !x.nx && varname != genTopLevelVarName && t != genStringDecAsBytesTyp && - t != genStringDecZCTyp && genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) - x.linef("%s %s := z.Extension(%s); %s != nil { z.DecExtension(%s%s, %s) ", hasIf.c(false), yy, varname, yy, addrPfx, varname, yy) - } - - if x.checkForSelfer(t, varname) { - if ti2.flagSelfer { - x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname) - return - } - if ti2.flagSelferPtr { - x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname) - return - } - if _, ok := x.td[rtid]; ok { - x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname) - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname) - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.td[rtid] = true - rtidAdded = true - } - - if ti2.flagBinaryUnmarshaler { - x.linef("%s z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", hasIf.c(false), ptrPfx, varname) - } else if ti2.flagBinaryUnmarshalerPtr { - x.linef("%s z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", hasIf.c(false), addrPfx, varname) - } - if ti2.flagJsonUnmarshaler { - x.linef("%s !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", hasIf.c(false), ptrPfx, varname) - } else if ti2.flagJsonUnmarshalerPtr { - x.linef("%s !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", hasIf.c(false), addrPfx, varname) - } else if ti2.flagTextUnmarshaler { - x.linef("%s !z.DecBinary() { z.DecTextUnmarshal(%s%v)", hasIf.c(false), ptrPfx, varname) - } else if ti2.flagTextUnmarshalerPtr { - x.linef("%s !z.DecBinary() { z.DecTextUnmarshal(%s%v)", hasIf.c(false), addrPfx, varname) - } - - x.lineIf(hasIf.c(true)) - - if x.decTryAssignPrimitive(varname, t, isptr) { - return - } - - switch t.Kind() { - case reflect.Chan: - x.xtraSM(varname, t, ti2, false, isptr) - case reflect.Array: - _, rtidu := genFastpathUnderlying(t, rtid, ti2) - if fastpathAvIndex(rtidu) != -1 { - g := x.newFastpathGenV(ti2.key) - x.linef("z.F.%sN((%s)(%s[:]), d)", g.MethodNamePfx("Dec", false), x.genTypeName(ti2.key), varname) - } else { - x.xtraSM(varname, t, ti2, false, isptr) - } - case reflect.Slice: - // if a []byte, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - - if rtid == uint8SliceTypId { - x.linef("%s%s = z.DecodeBytesInto(%s(%s[]byte)(%s))", ptrPfx, varname, ptrPfx, ptrPfx, varname) - } else { - tu, rtidu := genFastpathUnderlying(t, rtid, ti2) - if fastpathAvIndex(rtidu) != -1 { - g := x.newFastpathGenV(tu) - if rtid == rtidu { - x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) - } else { - x.linef("z.F.%sX((*%s)(%s%s), d)", g.MethodNamePfx("Dec", false), x.genTypeName(tu), addrPfx, varname) - } - } else { - x.xtraSM(varname, t, ti2, false, isptr) - // x.decListFallback(varname, rtid, false, t) - } - } - case reflect.Map: - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - - tu, rtidu := genFastpathUnderlying(t, rtid, ti2) - if fastpathAvIndex(rtidu) != -1 { - g := x.newFastpathGenV(tu) - if rtid == rtidu { - x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) - } else { - x.linef("z.F.%sX((*%s)(%s%s), d)", g.MethodNamePfx("Dec", false), x.genTypeName(tu), addrPfx, varname) - } - } else { - x.xtraSM(varname, t, ti2, false, isptr) - } - case reflect.Struct: - if inlist { - // no need to create temp variable if isptr, or x.F or x[F] - if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { - x.decStruct(varname, rtid, t) - } else { - varname2 := genTempVarPfx + "j" + mi - x.line(varname2 + " := &" + varname) - x.decStruct(varname2, rtid, t) - } - } else { - // delete(x.td, rtid) - x.line("z.DecFallback(" + addrPfx + varname + ", false)") - } - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.DecFallback(" + addrPfx + varname + ", true)") - } -} - -func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { - // This should only be used for exact primitives (ie un-named types). - // Named types may be implementations of Selfer, Unmarshaler, etc. - // They should be handled by dec(...) - - var ptr string - if isptr { - ptr = "*" - } - switch t.Kind() { - case reflect.Int: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) - case reflect.Int8: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) - case reflect.Int16: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) - case reflect.Int32: - x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) - case reflect.Int64: - x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) - - case reflect.Uint: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) - case reflect.Uint8: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) - case reflect.Uint16: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) - case reflect.Uint32: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) - case reflect.Uint64: - x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) - case reflect.Uintptr: - x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) - - case reflect.Float32: - x.linef("%s%s = (%s)(z.DecDecodeFloat32())", ptr, varname, x.genTypeName(t)) - case reflect.Float64: - x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) - - case reflect.Complex64: - x.linef("%s%s = (%s)(complex(z.DecDecodeFloat32(), 0))", ptr, varname, x.genTypeName(t)) - case reflect.Complex128: - x.linef("%s%s = (%s)(complex(r.DecodeFloat64(), 0))", ptr, varname, x.genTypeName(t)) - - case reflect.Bool: - x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) - case reflect.String: - if t == genStringDecAsBytesTyp { - x.linef("%s%s = r.DecodeStringAsBytes()", ptr, varname) - } else if t == genStringDecZCTyp { - x.linef("%s%s = (string)(z.DecStringZC(r.DecodeStringAsBytes()))", ptr, varname) - } else { - x.linef("%s%s = (%s)(z.DecStringZC(r.DecodeStringAsBytes()))", ptr, varname, x.genTypeName(t)) - } - default: - return false - } - return true -} - -func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.line("*" + varname + " = z.DecodeBytesInto(*((*[]byte)(" + varname + ")))") - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:])", t.Len(), varname) - return - } - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - CTyp string - Typ string - Immutable bool - Size int - } - telem := t.Elem() - ts := tstruc{genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} - - funcs := make(template.FuncMap) - - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, "", telem, false, true) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["xs"] = func() string { - return ts.Sfx - } - funcs["zero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["isArray"] = func() bool { - return t.Kind() == reflect.Array - } - funcs["isSlice"] = func() bool { - return t.Kind() == reflect.Slice - } - funcs["isChan"] = func() bool { - return t.Kind() == reflect.Chan - } - tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) - genCheckErr(err) - genCheckErr(tm.Execute(x.w, &ts)) -} - -func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - KTyp string - Typ string - Size int - } - telem := t.Elem() - tkey := t.Key() - ts := tstruc{ - genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), - x.genTypeName(telem), int(telem.Size() + tkey.Size()), - } - - funcs := make(template.FuncMap) - funcs["decElemZero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["decElemKindImmutable"] = func() bool { - return genIsImmutable(telem) - } - funcs["decElemKindPtr"] = func() bool { - return telem.Kind() == reflect.Ptr - } - funcs["decElemKindIntf"] = func() bool { - return telem.Kind() == reflect.Interface - } - funcs["decLineVarKStrBytes"] = func(varname string) string { - x.decVar(varname, "", genStringDecAsBytesTyp, false, true) - return "" - } - funcs["decLineVarKStrZC"] = func(varname string) string { - x.decVar(varname, "", genStringDecZCTyp, false, true) - return "" - } - funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, "", tkey, false, true) - return "" - } - funcs["decLineVar"] = func(varname, decodedNilVarname string) string { - x.decVar(varname, decodedNilVarname, telem, false, true) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["xs"] = func() string { - return ts.Sfx - } - - tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) - genCheckErr(err) - genCheckErr(tm.Execute(x.w, &ts)) -} - -func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { - ti := x.ti.get(rtid, t) - tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing. - x.line("switch string(" + kName + ") {") - var newbuf, nilbuf genBuf - for _, si := range tisfi { - x.line("case \"" + si.encName + "\":") - newbuf.reset() - nilbuf.reset() - varname3, t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) - if len(newbuf.buf) > 0 { - x.linef("if r.TryNil() { %s } else { %s", nilbuf.buf, newbuf.buf) - } - x.decVarMain(varname3, x.varsfx(), t2.Type, false) - if len(newbuf.buf) > 0 { - x.line("}") - } - } - x.line("default:") - // pass the slice here, so that the string will not escape, and maybe save allocation - x.linef("z.DecStructFieldNotFound(-1, string(%s))", kName) - x.linef("} // end switch %s", kName) -} - -func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - ti := x.ti.get(rtid, t) - i := x.varsfx() - kName := tpfx + "s" + i - - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - x.linef("for %sj%s := 0; z.DecContainerNext(%sj%s, %s, %shl%s); %sj%s++ {", - tpfx, i, tpfx, i, lenvarname, tpfx, i, tpfx, i) - - x.line("z.DecReadMapElemKey()") - - // emulate decstructfieldkey - switch ti.keyType { - case valueTypeInt: - x.linef("%s := strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10)", kName) - case valueTypeUint: - x.linef("%s := strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10)", kName) - case valueTypeFloat: - x.linef("%s := strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64)", kName) - default: // string - x.linef("%s := r.DecodeStringAsBytes()", kName) - } - - x.line("z.DecReadMapElemValue()") - x.decStructMapSwitch(kName, varname, rtid, t) - - x.line("} // end for " + tpfx + "j" + i) -} - -func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - i := x.varsfx() - ti := x.ti.get(rtid, t) - tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing. - x.linef("var %sj%s int", tpfx, i) - x.linef("var %sb%s bool", tpfx, i) // break - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - var newbuf, nilbuf genBuf - for _, si := range tisfi { - x.linef("%sb%s = !z.DecContainerNext(%sj%s, %s, %shl%s)", tpfx, i, tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { z.DecReadArrayEnd(); %s }", tpfx, i, breakString) - x.line("z.DecReadArrayElem()") - newbuf.reset() - nilbuf.reset() - varname3, t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) - if len(newbuf.buf) > 0 { - x.linef("if r.TryNil() { %s } else { %s", nilbuf.buf, newbuf.buf) - } - x.decVarMain(varname3, x.varsfx(), t2.Type, false) - if len(newbuf.buf) > 0 { - x.line("}") - } - x.linef("%sj%s++", tpfx, i) - } - // read remaining values and throw away. - x.linef("for ; z.DecContainerNext(%sj%s, %s, %shl%s); %sj%s++ {", - tpfx, i, lenvarname, tpfx, i, tpfx, i) - x.line("z.DecReadArrayElem()") - x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) - x.line("}") -} - -func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { - // varname MUST be a ptr, or a struct field or a slice element. - i := x.varsfx() - x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) - x.linef("if %sct%s == codecSelferValueTypeNil%s {", genTempVarPfx, i, x.xs) - x.linef("*(%s) = %s{}", varname, x.genTypeName(t)) - x.linef("} else if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := z.DecReadMapStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - - x.line("} else { ") - x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) - - x.line("}") - x.line("z.DecReadMapEnd()") - - // else if container is array - x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := z.DecReadArrayStart()") - x.linef("if %sl%s != 0 {", genTempVarPfx, i) - x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) - x.line("}") - x.line("z.DecReadArrayEnd()") - // else panic - x.line("} else { ") - x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") - x.line("} ") -} - // -------- -type fastpathGenV struct { - // fastpathGenV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice +type genFastpathV struct { + // genFastpathV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice MapKey string Elem string Primitive string @@ -2159,26 +77,7 @@ type fastpathGenV struct { NoCanonical bool } -func (x *genRunner) newFastpathGenV(t reflect.Type) (v fastpathGenV) { - v.NoCanonical = !genFastpathCanonical - switch t.Kind() { - case reflect.Slice, reflect.Array: - te := t.Elem() - v.Elem = x.genTypeName(te) - v.Size = int(te.Size()) - case reflect.Map: - te := t.Elem() - tk := t.Key() - v.Elem = x.genTypeName(te) - v.MapKey = x.genTypeName(tk) - v.Size = int(te.Size() + tk.Size()) - default: - halt.onerror(errGenUnexpectedTypeFastpath) - } - return -} - -func (x *fastpathGenV) MethodNamePfx(prefix string, prim bool) string { +func (x *genFastpathV) MethodNamePfx(prefix string, prim bool) string { var name []byte if prefix != "" { name = append(name, prefix...) @@ -2197,153 +96,14 @@ func (x *fastpathGenV) MethodNamePfx(prefix string, prim bool) string { return string(name) } -// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. -// -// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, -// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. -// We strip it here. -func genImportPath(t reflect.Type) (s string) { - s = t.PkgPath() - if genCheckVendor { - // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 - s = genStripVendor(s) - } - return -} +// -------- -// A go identifier is (letter|_)[letter|number|_]* -func genGoIdentifier(s string, checkFirstChar bool) string { - b := make([]byte, 0, len(s)) - t := make([]byte, 4) - var n int - for i, r := range s { - if checkFirstChar && i == 0 && !unicode.IsLetter(r) { - b = append(b, '_') - } - // r must be unicode_letter, unicode_digit or _ - if unicode.IsLetter(r) || unicode.IsDigit(r) { - n = utf8.EncodeRune(t, r) - b = append(b, t[:n]...) - } else { - b = append(b, '_') - } - } - return string(b) -} - -func genNonPtr(t reflect.Type) reflect.Type { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func genFastpathUnderlying(t reflect.Type, rtid uintptr, ti *typeInfo) (tu reflect.Type, rtidu uintptr) { - tu = t - rtidu = rtid - if ti.flagHasPkgPath { - tu = ti.fastpathUnderlying - rtidu = rt2id(tu) - } - return -} - -func genTitleCaseName(s string) string { - switch s { - case "interface{}", "interface {}": - return "Intf" - case "[]byte", "[]uint8", "bytes": - return "Bytes" - default: - return strings.ToUpper(s[0:1]) + s[1:] - } -} - -func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "Ptrto" - t = t.Elem() - } - tstr := t.String() - if tn := t.Name(); tn != "" { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - return ptrPfx + tn - } else { - if genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) - case reflect.Slice: - return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) - case reflect.Array: - return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) - case reflect.Chan: - var cx string - switch t.ChanDir() { - case reflect.SendDir: - cx = "ChanSend" - case reflect.RecvDir: - cx = "ChanRecv" - default: - cx = "Chan" - } - return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) - default: - if t == intfTyp { - return ptrPfx + "Interface" - } else { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - if t.Name() != "" { - return ptrPfx + t.Name() - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } else { - // best way to get the package name inclusive - if t.Name() != "" && genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - } -} - -// genCustomNameForType base32 encodes the t.String() value in such a way -// that it can be used within a function name. -func genCustomTypeName(tstr string) string { - len2 := genTypenameEnc.EncodedLen(len(tstr)) - bufx := make([]byte, len2) - genTypenameEnc.Encode(bufx, []byte(tstr)) - for i := len2 - 1; i >= 0; i-- { - if bufx[i] == '=' { - len2-- - } else { - break - } - } - return string(bufx[:len2]) -} - -func genIsImmutable(t reflect.Type) (v bool) { - return scalarBitset.isset(byte(t.Kind())) -} - -type genInternal struct { - Version int - Values []fastpathGenV +type genTmpl struct { + Values []genFastpathV Formats []string } -func (x genInternal) FastpathLen() (l int) { +func (x genTmpl) FastpathLen() (l int) { for _, v := range x.Values { // if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { if v.Primitive == "" { @@ -2353,7 +113,7 @@ func (x genInternal) FastpathLen() (l int) { return } -func genInternalZeroValue(s string) string { +func genTmplZeroValue(s string) string { switch s { case "interface{}", "interface {}": return "nil" @@ -2368,8 +128,8 @@ func genInternalZeroValue(s string) string { } } -var genInternalNonZeroValueIdx [6]uint64 -var genInternalNonZeroValueStrs = [...][6]string{ +var genTmplNonZeroValueIdx [6]uint64 +var genTmplNonZeroValueStrs = [...][6]string{ {`"string-is-an-interface-1"`, "true", `"some-string-1"`, `[]byte("some-string-1")`, "11.1", "111"}, {`"string-is-an-interface-2"`, "false", `"some-string-2"`, `[]byte("some-string-2")`, "22.2", "77"}, {`"string-is-an-interface-3"`, "true", `"some-string-3"`, `[]byte("some-string-3")`, "33.3e3", "127"}, @@ -2377,7 +137,7 @@ var genInternalNonZeroValueStrs = [...][6]string{ // Note: last numbers must be in range: 0-127 (as they may be put into a int8, uint8, etc) -func genInternalNonZeroValue(s string) string { +func genTmplNonZeroValue(s string) string { var i int switch s { case "interface{}", "interface {}": @@ -2393,14 +153,14 @@ func genInternalNonZeroValue(s string) string { default: i = 5 } - genInternalNonZeroValueIdx[i]++ - idx := genInternalNonZeroValueIdx[i] - slen := uint64(len(genInternalNonZeroValueStrs)) - return genInternalNonZeroValueStrs[idx%slen][i] // return string, to remove ambiguity + genTmplNonZeroValueIdx[i]++ + idx := genTmplNonZeroValueIdx[i] + slen := uint64(len(genTmplNonZeroValueStrs)) + return genTmplNonZeroValueStrs[idx%slen][i] // return string, to remove ambiguity } // Note: used for fastpath only -func genInternalEncCommandAsString(s string, vname string) string { +func genTmplEncCommandAsString(s string, vname string) string { switch s { case "uint64": return "e.e.EncodeUint(" + vname + ")" @@ -2411,7 +171,12 @@ func genInternalEncCommandAsString(s string, vname string) string { case "int", "int8", "int16", "int32": return "e.e.EncodeInt(int64(" + vname + "))" case "[]byte", "[]uint8", "bytes": - return "e.e.EncodeStringBytesRaw(" + vname + ")" + // return fmt.Sprintf( + // "if %s != nil { e.e.EncodeStringBytesRaw(%s) } "+ + // "else if e.h.NilCollectionToZeroLength { e.e.WriteArrayEmpty() } "+ + // "else { e.e.EncodeNil() }", vname, vname) + // return "e.e.EncodeStringBytesRaw(" + vname + ")" + return "e.e.EncodeBytes(" + vname + ")" case "string": return "e.e.EncodeString(" + vname + ")" case "float32": @@ -2423,12 +188,13 @@ func genInternalEncCommandAsString(s string, vname string) string { // case "symbol": // return "e.e.EncodeSymbol(" + vname + ")" default: - return "e.encode(" + vname + ")" + return fmt.Sprintf("if !e.encodeBuiltin(%s) { e.encodeR(reflect.ValueOf(%s)) }", vname, vname) + // return "e.encodeI(" + vname + ")" } } // Note: used for fastpath only -func genInternalDecCommandAsString(s string, mapkey bool) string { +func genTmplDecCommandAsString(s string, mapkey bool) string { switch s { case "uint": return "uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))" @@ -2458,77 +224,39 @@ func genInternalDecCommandAsString(s string, mapkey bool) string { // return "d.stringZC(d.d.DecodeStringAsBytes())" // } // return "string(d.d.DecodeStringAsBytes())" - return "d.stringZC(d.d.DecodeStringAsBytes())" + return "d.detach2Str(d.d.DecodeStringAsBytes())" case "[]byte", "[]uint8", "bytes": - return "d.d.DecodeBytes([]byte{})" + // return "bytesOk(d.d.DecodeBytes())" + return "bytesOKdbi(d.decodeBytesInto(v[uint(j)], false))" case "float32": - return "float32(d.decodeFloat32())" + return "float32(d.d.DecodeFloat32())" case "float64": return "d.d.DecodeFloat64()" case "complex64": - return "complex(d.decodeFloat32(), 0)" + return "complex(d.d.DecodeFloat32(), 0)" case "complex128": return "complex(d.d.DecodeFloat64(), 0)" case "bool": return "d.d.DecodeBool()" default: - halt.onerror(errors.New("gen internal: unknown type for decode: " + s)) + halt.error(errors.New("gen internal: unknown type for decode: " + s)) } return "" } -// func genInternalSortType(s string, elem bool) string { -// for _, v := range [...]string{ -// "int", -// "uint", -// "float", -// "bool", -// "string", -// "bytes", "[]uint8", "[]byte", -// } { -// if v == "[]byte" || v == "[]uint8" { -// v = "bytes" -// } -// if strings.HasPrefix(s, v) { -// if v == "int" || v == "uint" || v == "float" { -// v += "64" -// } -// if elem { -// return v -// } -// return v + "Slice" -// } -// } -// halt.onerror(errors.New("sorttype: unexpected type: " + s)) -// } - -func genInternalSortType(s string, elem bool) string { +func genTmplSortType(s string, elem bool) string { if elem { return s } return s + "Slice" } -// MARKER: keep in sync with codecgen/gen.go -func genStripVendor(s string) string { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } - return s -} +// var genTmplMu sync.Mutex +var genTmplV = genTmpl{} +var genTmplFuncs template.FuncMap +var genTmplOnce sync.Once -// var genInternalMu sync.Mutex -var genInternalV = genInternal{Version: genVersion} -var genInternalTmplFuncs template.FuncMap -var genInternalOnce sync.Once - -func genInternalInit() { +func genTmplInit() { wordSizeBytes := int(intBitsize) / 8 typesizes := map[string]int{ @@ -2579,78 +307,88 @@ func genInternalInit() { var primitivetypes, slicetypes, mapkeytypes, mapvaltypes []string primitivetypes = types[:] + slicetypes = types[:] mapkeytypes = types[:] mapvaltypes = types[:] if genFastpathTrimTypes { - // Note: we only create fast-paths for commonly used types. + // Note: we only create fastpaths for commonly used types. // Consequently, things like int8, uint16, uint, etc are commented out. - - slicetypes = genInternalFastpathSliceTypes() - mapkeytypes = genInternalFastpathMapKeyTypes() - mapvaltypes = genInternalFastpathMapValueTypes() + slicetypes = []string{ + "interface{}", + "string", + "[]byte", + "float32", + "float64", + "uint8", // keep fastpath, so it doesn't have to go through reflection + "uint64", + "int", + "int32", // rune + "int64", + "bool", + } + mapkeytypes = []string{ + "string", + "uint8", // byte + "uint64", // used for keys + "int", // default number key + "int32", // rune + } + mapvaltypes = []string{ + "interface{}", + "string", + "[]byte", + "uint8", // byte + "uint64", // used for keys, etc + "int", // default number + "int32", // rune (mostly used for unicode) + "float64", + "bool", + } } - // var mapkeytypes [len(&types) - 1]string // skip bool - // copy(mapkeytypes[:], types[:]) + var gt = genTmpl{Formats: genFormats} - // var mb []byte - // mb = append(mb, '|') - // for _, s := range mapkeytypes { - // mb = append(mb, s...) - // mb = append(mb, '|') - // } - // var mapkeytypestr = string(mb) - - var gt = genInternal{Version: genVersion, Formats: genFormats} - - // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + // For each slice or map type, there must be a (symmetrical) Encode and Decode fastpath function for _, s := range primitivetypes { gt.Values = append(gt.Values, - fastpathGenV{Primitive: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical}) + genFastpathV{Primitive: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical}) } for _, s := range slicetypes { - // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. - // gt.Values = append(gt.Values, fastpathGenV{Elem: s, Size: typesizes[s]}) - // } gt.Values = append(gt.Values, - fastpathGenV{Elem: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical}) + genFastpathV{Elem: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical}) } for _, s := range mapkeytypes { - // if _, ok := typesizes[s]; !ok { - // if strings.Contains(mapkeytypestr, "|"+s+"|") { - // gt.Values = append(gt.Values, fastpathGenV{MapKey: s, Elem: s, Size: 2 * typesizes[s]}) - // } for _, ms := range mapvaltypes { gt.Values = append(gt.Values, - fastpathGenV{MapKey: s, Elem: ms, Size: typesizes[s] + typesizes[ms], NoCanonical: !genFastpathCanonical}) + genFastpathV{MapKey: s, Elem: ms, Size: typesizes[s] + typesizes[ms], NoCanonical: !genFastpathCanonical}) } } funcs := make(template.FuncMap) // funcs["haspfx"] = strings.HasPrefix - funcs["encmd"] = genInternalEncCommandAsString - funcs["decmd"] = genInternalDecCommandAsString - funcs["zerocmd"] = genInternalZeroValue - funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["encmd"] = genTmplEncCommandAsString + funcs["decmd"] = genTmplDecCommandAsString + funcs["zerocmd"] = genTmplZeroValue + funcs["nonzerocmd"] = genTmplNonZeroValue funcs["hasprefix"] = strings.HasPrefix - funcs["sorttype"] = genInternalSortType + funcs["sorttype"] = genTmplSortType - genInternalV = gt - genInternalTmplFuncs = funcs + genTmplV = gt + genTmplFuncs = funcs } -// genInternalGoFile is used to generate source files from templates. -func genInternalGoFile(r io.Reader, w io.Writer) (err error) { - genInternalOnce.Do(genInternalInit) +// genTmplGoFile is used to generate source files from templates. +func genTmplGoFile(r io.Reader, w io.Writer) (err error) { + genTmplOnce.Do(genTmplInit) - gt := genInternalV + gt := genTmplV - t := template.New("").Funcs(genInternalTmplFuncs) + t := template.New("").Funcs(genTmplFuncs) - tmplstr, err := ioutil.ReadAll(r) + tmplstr, err := io.ReadAll(r) if err != nil { return } @@ -2675,175 +413,7 @@ func genInternalGoFile(r io.Reader, w io.Writer) (err error) { return } -func genInternalFastpathSliceTypes() []string { - return []string{ - "interface{}", - "string", - "[]byte", - "float32", - "float64", - // "uint", - // "uint8", // no need for fastpath of []uint8, as it is handled specially - "uint8", // keep fast-path, so it doesn't have to go through reflection - // "uint16", - // "uint32", - "uint64", - // "uintptr", - "int", - // "int8", - // "int16", - "int32", // rune - "int64", - "bool", - } -} - -func genInternalFastpathMapKeyTypes() []string { - return []string{ - // "interface{}", - "string", - // "[]byte", - // "float32", - // "float64", - // "uint", - "uint8", // byte - // "uint16", - // "uint32", - "uint64", // used for keys - // "uintptr", - "int", // default number key - // "int8", - // "int16", - "int32", // rune - // "int64", - // "bool", - } -} - -func genInternalFastpathMapValueTypes() []string { - return []string{ - "interface{}", - "string", - "[]byte", - // "uint", - "uint8", // byte - // "uint16", - // "uint32", - "uint64", // used for keys, etc - // "uintptr", - "int", // default number - //"int8", - // "int16", - "int32", // rune (mostly used for unicode) - // "int64", - // "float32", - "float64", - "bool", - } -} - -// sort-slice ... -// generates sort implementations for -// various slice types and combination slice+reflect.Value types. -// -// The combination slice+reflect.Value types are used -// during canonical encode, and the others are used during fast-path -// encoding of map keys. - -// genInternalSortableTypes returns the types -// that are used for fast-path canonical's encoding of maps. -// -// For now, we only support the highest sizes for -// int64, uint64, float64, bool, string, bytes. -func genInternalSortableTypes() []string { - return genInternalFastpathMapKeyTypes() -} - -// genInternalSortablePlusTypes returns the types -// that are used for reflection-based canonical's encoding of maps. -// -// For now, we only support the highest sizes for -// int64, uint64, float64, string, bytes. -func genInternalSortablePlusTypes() []string { - return []string{ - "string", - "float64", - "uint64", - // "uintptr", - "int64", - // "bool", - "time", - "bytes", - } -} - -func genTypeForShortName(s string) string { - switch s { - case "time": - return "time.Time" - case "bytes": - return "[]byte" - } - return s -} - -func genArgs(args ...interface{}) map[string]interface{} { - m := make(map[string]interface{}, len(args)/2) - for i := 0; i < len(args); { - m[args[i].(string)] = args[i+1] - i += 2 - } - return m -} - -func genEndsWith(s0 string, sn ...string) bool { - for _, s := range sn { - if strings.HasSuffix(s0, s) { - return true - } - } - return false -} - -func genCheckErr(err error) { - halt.onerror(err) -} - -func genRunSortTmpl2Go(fnameIn, fnameOut string) { - var err error - - funcs := make(template.FuncMap) - funcs["sortables"] = genInternalSortableTypes - funcs["sortablesplus"] = genInternalSortablePlusTypes - funcs["tshort"] = genTypeForShortName - funcs["endswith"] = genEndsWith - funcs["args"] = genArgs - - t := template.New("").Funcs(funcs) - fin, err := os.Open(fnameIn) - genCheckErr(err) - defer fin.Close() - fout, err := os.Create(fnameOut) - genCheckErr(err) - defer fout.Close() - tmplstr, err := ioutil.ReadAll(fin) - genCheckErr(err) - t, err = t.Parse(string(tmplstr)) - genCheckErr(err) - var out bytes.Buffer - err = t.Execute(&out, 0) - genCheckErr(err) - bout, err := format.Source(out.Bytes()) - if err != nil { - fout.Write(out.Bytes()) // write out if error, so we can still see. - } - genCheckErr(err) - // write out if error, as much as possible, so we can still see. - _, err = fout.Write(bout) - genCheckErr(err) -} - -func genRunTmpl2Go(fnameIn, fnameOut string) { +func genTmplRun2Go(fnameIn, fnameOut string) { // println("____ " + fnameIn + " --> " + fnameOut + " ______") fin, err := os.Open(fnameIn) genCheckErr(err) @@ -2851,30 +421,6 @@ func genRunTmpl2Go(fnameIn, fnameOut string) { fout, err := os.Create(fnameOut) genCheckErr(err) defer fout.Close() - err = genInternalGoFile(fin, fout) + err = genTmplGoFile(fin, fout) genCheckErr(err) } - -// --- some methods here for other types, which are only used in codecgen - -// depth returns number of valid nodes in the hierachy -func (path *structFieldInfoPathNode) root() *structFieldInfoPathNode { -TOP: - if path.parent != nil { - path = path.parent - goto TOP - } - return path -} - -func (path *structFieldInfoPathNode) fullpath() (p []*structFieldInfoPathNode) { - // this method is mostly called by a command-line tool - it's not optimized, and that's ok. - // it shouldn't be used in typical runtime use - as it does unnecessary allocation. - d := path.depth() - p = make([]*structFieldInfoPathNode, d) - for d--; d >= 0; d-- { - p[d] = path - path = path.parent - } - return -} diff --git a/vendor/github.com/ugorji/go/codec/gen_mono.go b/vendor/github.com/ugorji/go/codec/gen_mono.go new file mode 100644 index 000000000..841147863 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_mono.go @@ -0,0 +1,586 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build codec.build + +package codec + +import ( + "go/ast" + "go/format" + "go/parser" + "go/token" + "os" + "slices" + "strings" +) + +// This tool will monomorphize types scoped to a specific format. +// +// This tool only monomorphized the type Name, and not a function Name. +// Explicitly, generic functions are not supported, as they cannot be monomorphized +// to a specific format without a corresponding name change. +// +// However, for types constrained to encWriter or decReader, +// which are shared across formats, there's no place to put them without duplication. + +const genMonoParserMode = parser.AllErrors | parser.SkipObjectResolution + +var genMonoSpecialFieldTypes = []string{"helperDecReader"} + +// These functions should take the address of first param when monomorphized +var genMonoSpecialFunc4Addr = []string{} // {"decByteSlice"} + +var genMonoImportsToSkip = []string{`"errors"`, `"fmt"`, `"net/rpc"`} + +var genMonoRefImportsVia_ = [][2]string{ + // {"errors", "New"}, +} + +var genMonoCallsToSkip = []string{"callMake"} + +type genMonoFieldState uint + +const ( + genMonoFieldRecv genMonoFieldState = iota << 1 + genMonoFieldParamsResult + genMonoFieldStruct +) + +type genMonoImports struct { + set map[string]struct{} + specs []*ast.ImportSpec +} + +type genMono struct { + files map[string][]byte + typParam map[string]*ast.Field + typParamTransient map[string]*ast.Field +} + +func (x *genMono) init() { + x.files = make(map[string][]byte) + x.typParam = make(map[string]*ast.Field) + x.typParamTransient = make(map[string]*ast.Field) +} + +func (x *genMono) reset() { + clear(x.typParam) + clear(x.typParamTransient) +} + +func (m *genMono) hdl(hname string) { + m.reset() + m.do(hname, []string{"encode.go", "decode.go", hname + ".go"}, []string{"base.notfastpath.go", "base.notfastpath.notmono.go"}, "", "") + m.do(hname, []string{"base.notfastpath.notmono.go"}, nil, ".notfastpath", ` && (notfastpath || codec.notfastpath)`) + m.do(hname, []string{"base.fastpath.notmono.generated.go"}, []string{"base.fastpath.generated.go"}, ".fastpath", ` && !notfastpath && !codec.notfastpath`) +} + +func (m *genMono) do(hname string, fnames, tnames []string, fnameInfx string, buildTagsSfx string) { + // keep m.typParams across whole call, as all others use it + const fnameSfx = ".mono.generated.go" + fname := hname + fnameInfx + fnameSfx + + var imports = genMonoImports{set: make(map[string]struct{})} + + r1, fset := m.merge(fnames, tnames, &imports) + m.trFile(r1, hname, true) + + r2, fset := m.merge(fnames, tnames, &imports) + m.trFile(r2, hname, false) + + r0 := genMonoOutInit(imports.specs, fname) + r0.Decls = append(r0.Decls, r1.Decls...) + r0.Decls = append(r0.Decls, r2.Decls...) + + // output r1 to a file + f, err := os.Create(fname) + halt.onerror(err) + defer f.Close() + + var s genMonoStrBuilder + s.s(`//go:build !notmono && !codec.notmono `).s(buildTagsSfx).s(` + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +`) + _, err = f.Write(s.v) + halt.onerror(err) + err = format.Node(f, fset, r0) + halt.onerror(err) +} + +func (x *genMono) file(fname string) (b []byte) { + b = x.files[fname] + if b == nil { + var err error + b, err = os.ReadFile(fname) + halt.onerror(err) + x.files[fname] = b + } + return +} + +func (x *genMono) merge(fNames, tNames []string, imports *genMonoImports) (dst *ast.File, fset *token.FileSet) { + // typParams used in fnLoadTyps + var typParams map[string]*ast.Field + var loadTyps bool + fnLoadTyps := func(node ast.Node) bool { + var ok bool + switch n := node.(type) { + case *ast.GenDecl: + if n.Tok == token.TYPE { + for _, v := range n.Specs { + nn := v.(*ast.TypeSpec) + ok = genMonoTypeParamsOk(nn.TypeParams) + if ok { + // each decl will have only 1 var/type + typParams[nn.Name.Name] = nn.TypeParams.List[0] + if loadTyps { + dst.Decls = append(dst.Decls, &ast.GenDecl{Tok: n.Tok, Specs: []ast.Spec{v}}) + } + } + } + } + return false + } + return true + } + + // we only merge top-level methods and types + fnIdX := func(n *ast.FuncDecl, n2 *ast.IndexExpr) (ok bool) { + n9, ok9 := n2.Index.(*ast.Ident) + n3, ok := n2.X.(*ast.Ident) // n3 = type name + ok = ok && ok9 && n9.Name == "T" + if ok { + _, ok = x.typParam[n3.Name] + } + return + } + + fnLoadMethodsAndImports := func(node ast.Node) bool { + var ok bool + switch n := node.(type) { + case *ast.FuncDecl: + // TypeParams is nil for methods, as it is defined at the type node + // instead, look at the name, and + // if IndexExpr.Index=T, and IndexExpr.X matches a type name seen already + // then ok = true + if n.Recv == nil || len(n.Recv.List) != 1 { + return false + } + ok = false + switch nn := n.Recv.List[0].Type.(type) { + case *ast.IndexExpr: + ok = fnIdX(n, nn) + case *ast.StarExpr: + switch nn2 := nn.X.(type) { + case *ast.IndexExpr: + ok = fnIdX(n, nn2) + } + } + if ok { + dst.Decls = append(dst.Decls, n) + } + return false + case *ast.GenDecl: + if n.Tok == token.IMPORT { + for _, v := range n.Specs { + nn := v.(*ast.ImportSpec) + if slices.Contains(genMonoImportsToSkip, nn.Path.Value) { + continue + } + if _, ok = imports.set[nn.Path.Value]; !ok { + imports.specs = append(imports.specs, nn) + imports.set[nn.Path.Value] = struct{}{} + } + } + } + return false + } + return true + } + + fset = token.NewFileSet() + fnLoadAsts := func(names []string) (asts []*ast.File) { + for _, fname := range names { + fsrc := x.file(fname) + f, err := parser.ParseFile(fset, fname, fsrc, genMonoParserMode) + halt.onerror(err) + asts = append(asts, f) + } + return + } + + clear(x.typParamTransient) + + dst = &ast.File{ + Name: &ast.Ident{Name: "codec"}, + } + + fs := fnLoadAsts(fNames) + ts := fnLoadAsts(tNames) + + loadTyps = true + typParams = x.typParam + for _, v := range fs { + ast.Inspect(v, fnLoadTyps) + } + loadTyps = false + typParams = x.typParamTransient + for _, v := range ts { + ast.Inspect(v, fnLoadTyps) + } + typParams = nil + for _, v := range fs { + ast.Inspect(v, fnLoadMethodsAndImports) + } + + return +} + +func (x *genMono) trFile(r *ast.File, hname string, isbytes bool) { + fn := func(node ast.Node) bool { + switch n := node.(type) { + case *ast.TypeSpec: + // type x[T encDriver] struct { ... } + if !genMonoTypeParamsOk(n.TypeParams) { + return false + } + x.trType(n, hname, isbytes) + return false + case *ast.FuncDecl: + if n.Recv == nil || len(n.Recv.List) != 1 { + return false + } + if _, ok := n.Recv.List[0].Type.(*ast.Ident); ok { + return false + } + tp := x.trMethodSign(n, hname, isbytes) // receiver, params, results + // handle the body + x.trMethodBody(n.Body, tp, hname, isbytes) + return false + } + return true + } + ast.Inspect(r, fn) + + // set type params to nil, and Pos to NoPos + fn = func(node ast.Node) bool { + switch n := node.(type) { + case *ast.FuncType: + if genMonoTypeParamsOk(n.TypeParams) { + n.TypeParams = nil + } + case *ast.TypeSpec: // for type ... + if genMonoTypeParamsOk(n.TypeParams) { + n.TypeParams = nil + } + } + return true + } + ast.Inspect(r, fn) +} + +func (x *genMono) trType(n *ast.TypeSpec, hname string, isbytes bool) { + sfx, _, _, hnameUp := genMonoIsBytesVals(hname, isbytes) + tp := n.TypeParams.List[0] + switch tp.Type.(*ast.Ident).Name { + case "encDriver", "decDriver": + n.Name.Name += hnameUp + sfx + case "encWriter", "decReader": + n.Name.Name += sfx + } + + // handle the Struct and Array types + switch nn := n.Type.(type) { + case *ast.StructType: + x.trStruct(nn, tp, hname, isbytes) + case *ast.ArrayType: + x.trArray(nn, tp, hname, isbytes) + } +} + +func (x *genMono) trMethodSign(n *ast.FuncDecl, hname string, isbytes bool) (tp *ast.Field) { + // check if recv type is not parameterized + tp = x.trField(n.Recv.List[0], nil, hname, isbytes, genMonoFieldRecv) + // handle params and results + x.trMethodSignNonRecv(n.Type.Params, tp, hname, isbytes) + x.trMethodSignNonRecv(n.Type.Results, tp, hname, isbytes) + return +} + +func (x *genMono) trMethodSignNonRecv(r *ast.FieldList, tp *ast.Field, hname string, isbytes bool) { + if r == nil || len(r.List) == 0 { + return + } + for _, v := range r.List { + x.trField(v, tp, hname, isbytes, genMonoFieldParamsResult) + } +} + +func (x *genMono) trStruct(r *ast.StructType, tp *ast.Field, hname string, isbytes bool) { + // search for fields, and update accordingly + // type x[T encDriver] struct { w T } + // var x *A[T] + // A[T] + if r == nil || r.Fields == nil || len(r.Fields.List) == 0 { + return + } + for _, v := range r.Fields.List { + x.trField(v, tp, hname, isbytes, genMonoFieldStruct) + } +} + +func (x *genMono) trArray(n *ast.ArrayType, tp *ast.Field, hname string, isbytes bool) { + sfx, _, _, hnameUp := genMonoIsBytesVals(hname, isbytes) + // type fastpathEs[T encDriver] [56]fastpathE[T] + // p := tp.Names[0].Name + switch elt := n.Elt.(type) { + // case *ast.InterfaceType: + case *ast.IndexExpr: + if elt.Index.(*ast.Ident).Name == "T" { // generic + n.Elt = ast.NewIdent(elt.X.(*ast.Ident).Name + hnameUp + sfx) + } + } +} + +func (x *genMono) trMethodBody(r *ast.BlockStmt, tp *ast.Field, hname string, isbytes bool) { + // find the parent node for an indexExpr, or a T/*T, and set the value back in there + + fn := func(pnode ast.Node) bool { + var pn *ast.Ident + fnUp := func() { + x.updateIdentForT(pn, hname, tp, isbytes, false) + } + switch n := pnode.(type) { + // case *ast.SelectorExpr: + // case *ast.TypeAssertExpr: + // case *ast.IndexExpr: + case *ast.StarExpr: + if genMonoUpdateIndexExprT(&pn, n.X) { + n.X = pn + fnUp() + } + case *ast.CallExpr: + for i4, n4 := range n.Args { + if genMonoUpdateIndexExprT(&pn, n4) { + n.Args[i4] = pn + fnUp() + } + } + if n4, ok4 := n.Fun.(*ast.Ident); ok4 && slices.Contains(genMonoSpecialFunc4Addr, n4.Name) { + n.Args[0] = &ast.UnaryExpr{Op: token.AND, X: n.Args[0].(*ast.SelectorExpr)} + } + case *ast.CompositeLit: + if genMonoUpdateIndexExprT(&pn, n.Type) { + n.Type = pn + fnUp() + } + case *ast.ArrayType: + if genMonoUpdateIndexExprT(&pn, n.Elt) { + n.Elt = pn + fnUp() + } + case *ast.ValueSpec: + for i2, n2 := range n.Values { + if genMonoUpdateIndexExprT(&pn, n2) { + n.Values[i2] = pn + fnUp() + } + } + if genMonoUpdateIndexExprT(&pn, n.Type) { + n.Type = pn + fnUp() + } + case *ast.BinaryExpr: + // early return here, since the 2 things can apply + if genMonoUpdateIndexExprT(&pn, n.X) { + n.X = pn + fnUp() + } + if genMonoUpdateIndexExprT(&pn, n.Y) { + n.Y = pn + fnUp() + } + return true + } + return true + } + ast.Inspect(r, fn) +} + +func (x *genMono) trField(f *ast.Field, tpt *ast.Field, hname string, isbytes bool, state genMonoFieldState) (tp *ast.Field) { + var pn *ast.Ident + switch nn := f.Type.(type) { + case *ast.IndexExpr: + if genMonoUpdateIndexExprT(&pn, nn) { + f.Type = pn + } + case *ast.StarExpr: + if genMonoUpdateIndexExprT(&pn, nn.X) { + nn.X = pn + } + case *ast.FuncType: + x.trMethodSignNonRecv(nn.Params, tpt, hname, isbytes) + x.trMethodSignNonRecv(nn.Results, tpt, hname, isbytes) + return + case *ast.ArrayType: + x.trArray(nn, tpt, hname, isbytes) + return + case *ast.Ident: + if state == genMonoFieldRecv || nn.Name != "T" { + return + } + pn = nn // "T" + if state == genMonoFieldParamsResult { + f.Type = &ast.StarExpr{X: pn} + } + } + if pn == nil { + return + } + + tp = x.updateIdentForT(pn, hname, tpt, isbytes, true) + return +} + +func (x *genMono) updateIdentForT(pn *ast.Ident, hname string, tp *ast.Field, + isbytes bool, lookupTP bool) (tp2 *ast.Field) { + sfx, writer, reader, hnameUp := genMonoIsBytesVals(hname, isbytes) + // handle special ones e.g. helperDecReader et al + if slices.Contains(genMonoSpecialFieldTypes, pn.Name) { + pn.Name += sfx + return + } + + if pn.Name != "T" && lookupTP { + tp = x.typParam[pn.Name] + if tp == nil { + tp = x.typParamTransient[pn.Name] + } + } + + paramtyp := tp.Type.(*ast.Ident).Name + if pn.Name == "T" { + switch paramtyp { + case "encDriver", "decDriver": + pn.Name = hname + genMonoTitleCase(paramtyp) + sfx + case "encWriter": + pn.Name = writer + case "decReader": + pn.Name = reader + } + } else { + switch paramtyp { + case "encDriver", "decDriver": + pn.Name += hnameUp + sfx + case "encWriter", "decReader": + pn.Name += sfx + } + } + return tp +} + +func genMonoUpdateIndexExprT(pn **ast.Ident, node ast.Node) (pnok bool) { + *pn = nil + if n2, ok := node.(*ast.IndexExpr); ok { + n9, ok9 := n2.Index.(*ast.Ident) + n3, ok := n2.X.(*ast.Ident) + if ok && ok9 && n9.Name == "T" { + *pn, pnok = ast.NewIdent(n3.Name), true + } + } + return +} + +func genMonoTitleCase(s string) string { + return strings.ToUpper(s[:1]) + s[1:] +} + +func genMonoIsBytesVals(hName string, isbytes bool) (suffix, writer, reader, hNameUp string) { + hNameUp = genMonoTitleCase(hName) + if isbytes { + return "Bytes", "bytesEncAppender", "bytesDecReader", hNameUp + } + return "IO", "bufioEncWriter", "ioDecReader", hNameUp +} + +func genMonoTypeParamsOk(v *ast.FieldList) (ok bool) { + if v == nil || v.List == nil || len(v.List) != 1 { + return false + } + pn := v.List[0] + if len(pn.Names) != 1 { + return false + } + pnName := pn.Names[0].Name + if pnName != "T" { + return false + } + // ignore any nodes which are not idents e.g. cmp.orderedRv + vv, ok := pn.Type.(*ast.Ident) + if !ok { + return false + } + switch vv.Name { + case "encDriver", "decDriver", "encWriter", "decReader": + return true + } + return false +} + +func genMonoCopy(src *ast.File) (dst *ast.File) { + dst = &ast.File{ + Name: &ast.Ident{Name: "codec"}, + } + dst.Decls = append(dst.Decls, src.Decls...) + return +} + +type genMonoStrBuilder struct { + v []byte +} + +func (x *genMonoStrBuilder) s(v string) *genMonoStrBuilder { + x.v = append(x.v, v...) + return x +} + +func genMonoOutInit(importSpecs []*ast.ImportSpec, fname string) (f *ast.File) { + // ParseFile seems to skip the //go:build stanza + // it should be written directly into the file + var s genMonoStrBuilder + s.s(` +package codec + +import ( +`) + for _, v := range importSpecs { + s.s("\t").s(v.Path.Value).s("\n") + } + s.s(")\n") + for _, v := range genMonoRefImportsVia_ { + s.s("var _ = ").s(v[0]).s(".").s(v[1]).s("\n") + } + f, err := parser.ParseFile(token.NewFileSet(), fname, s.v, genMonoParserMode) + halt.onerror(err) + return +} + +func genMonoAll() { + // hdls := []Handle{ + // (*SimpleHandle)(nil), + // (*JsonHandle)(nil), + // (*CborHandle)(nil), + // (*BincHandle)(nil), + // (*MsgpackHandle)(nil), + // } + hdls := []string{"simple", "json", "cbor", "binc", "msgpack"} + var m genMono + m.init() + for _, v := range hdls { + m.hdl(v) + } +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go deleted file mode 100644 index 25c5b0208..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.5 -// +build go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = true - -func reflectArrayOf(count int, elem reflect.Type) reflect.Type { - return reflect.ArrayOf(count, elem) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go deleted file mode 100644 index a32dfd7de..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.5 -// +build !go1.5 - -package codec - -import ( - "errors" - "reflect" -) - -const reflectArrayOfSupported = false - -var errNoReflectArrayOf = errors.New("codec: reflect.ArrayOf unsupported by this go version") - -func reflectArrayOf(count int, elem reflect.Type) reflect.Type { - panic(errNoReflectArrayOf) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_check_supported.go b/vendor/github.com/ugorji/go/codec/goversion_check_supported.go new file mode 100644 index 000000000..067c5e9ce --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_check_supported.go @@ -0,0 +1,20 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build !go1.21 + +package codec + +import "errors" + +// Moving forward, this codec package will support at least the last 4 major Go releases. +// +// As of early summer 2025, codec will support go 1.21, 1.22, 1.23, 1.24 releases of go. +// This allows use of the followin: +// - stabilized generics +// - min/max/clear +// - slice->array conversion + +func init() { + panic(errors.New("codec: supports go 1.21 and above only")) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go deleted file mode 100644 index 688d6b62d..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.5 -// +build go1.5 - -package codec - -import "time" - -func fmtTime(t time.Time, fmt string, b []byte) []byte { - return t.AppendFormat(b, fmt) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go deleted file mode 100644 index a1b8b973e..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.5 -// +build !go1.5 - -package codec - -import "time" - -func fmtTime(t time.Time, fmt string, b []byte) []byte { - s := t.Format(fmt) - b = b[:len(s)] - copy(b, s) - return b -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_gte_go120.go b/vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_gte_go120.go deleted file mode 100644 index d5fed78e2..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_gte_go120.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.20 && !safe && !codec.safe && !appengine -// +build go1.20,!safe,!codec.safe,!appengine - -package codec - -import ( - _ "reflect" // needed for go linkname(s) - "unsafe" -) - -func growslice(typ unsafe.Pointer, old unsafeSlice, num int) (s unsafeSlice) { - // culled from GOROOT/runtime/slice.go - num -= old.Cap - old.Len - s = rtgrowslice(old.Data, old.Cap+num, old.Cap, num, typ) - s.Len = old.Len - return -} - -//go:linkname rtgrowslice runtime.growslice -//go:noescape -func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice - -// //go:linkname growslice reflect.growslice -// //go:noescape -// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice diff --git a/vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_lt_go120.go b/vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_lt_go120.go deleted file mode 100644 index 550c5d9e0..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_lt_go120.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.9 && !go1.20 && !safe && !codec.safe && !appengine -// +build go1.9,!go1.20,!safe,!codec.safe,!appengine - -package codec - -import ( - _ "runtime" // needed for go linkname(s) - "unsafe" -) - -//go:linkname growslice runtime.growslice -//go:noescape -func growslice(typ unsafe.Pointer, old unsafeSlice, num int) unsafeSlice diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go deleted file mode 100644 index 805303172..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.10 -// +build !go1.10 - -package codec - -import "reflect" - -func makeMapReflect(t reflect.Type, size int) reflect.Value { - return reflect.MakeMap(t) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go deleted file mode 100644 index 46f787db3..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.10 && (safe || codec.safe || appengine) -// +build go1.10 -// +build safe codec.safe appengine - -package codec - -import "reflect" - -func makeMapReflect(t reflect.Type, size int) reflect.Value { - return reflect.MakeMapWithSize(t, size) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go deleted file mode 100644 index 03c069f0f..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.10 && !safe && !codec.safe && !appengine -// +build go1.10,!safe,!codec.safe,!appengine - -package codec - -import ( - "reflect" - "unsafe" -) - -func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) { - t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - urv.typ = t - urv.flag = uintptr(reflect.Map) - urv.ptr = makemap(t, size, nil) - return -} - -//go:linkname makemap runtime.makemap -//go:noescape -func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer diff --git a/vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go b/vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go deleted file mode 100644 index 16c8921ba..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.12 && (safe || codec.safe || appengine) -// +build go1.12 -// +build safe codec.safe appengine - -package codec - -import "reflect" - -type mapIter struct { - t *reflect.MapIter - m reflect.Value - values bool -} - -func (t *mapIter) Next() (r bool) { - return t.t.Next() -} - -func (t *mapIter) Key() reflect.Value { - return t.t.Key() -} - -func (t *mapIter) Value() (r reflect.Value) { - if t.values { - return t.t.Value() - } - return -} - -func (t *mapIter) Done() {} - -func mapRange(t *mapIter, m, k, v reflect.Value, values bool) { - *t = mapIter{ - m: m, - t: m.MapRange(), - values: values, - } -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go b/vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go deleted file mode 100644 index 85c8ea72f..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.7 && !go1.12 && (safe || codec.safe || appengine) -// +build go1.7 -// +build !go1.12 -// +build safe codec.safe appengine - -package codec - -import "reflect" - -type mapIter struct { - m reflect.Value - keys []reflect.Value - j int - values bool -} - -func (t *mapIter) Next() (r bool) { - t.j++ - return t.j < len(t.keys) -} - -func (t *mapIter) Key() reflect.Value { - return t.keys[t.j] -} - -func (t *mapIter) Value() (r reflect.Value) { - if t.values { - return t.m.MapIndex(t.keys[t.j]) - } - return -} - -func (t *mapIter) Done() {} - -func mapRange(t *mapIter, m, k, v reflect.Value, values bool) { - *t = mapIter{ - m: m, - keys: m.MapKeys(), - values: values, - j: -1, - } -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_noswissmap_unsafe.go b/vendor/github.com/ugorji/go/codec/goversion_noswissmap_unsafe.go new file mode 100644 index 000000000..ca61f6f08 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_noswissmap_unsafe.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build !safe && !codec.safe && !appengine && !go1.24 + +package codec + +import "unsafe" + +// retrofited from hIter struct + +type unsafeMapIterPadding struct { + _ [6]unsafe.Pointer // padding: *maptype, *hmap, buckets, *bmap, overflow, oldoverflow, + _ [4]uintptr // padding: uintptr, uint8, bool fields + _ uintptr // padding: wasted (try to fill cache-line at multiple of 4) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_swissmap_unsafe.go b/vendor/github.com/ugorji/go/codec/goversion_swissmap_unsafe.go new file mode 100644 index 000000000..5f062e87c --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_swissmap_unsafe.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build !safe && !codec.safe && !appengine && go1.24 + +package codec + +import "unsafe" + +// retrofited from linknameIter struct (compatibility layer for swissmaps) + +type unsafeMapIterPadding struct { + _ [2]unsafe.Pointer // padding: *abi.SwissMapType, *maps.Iter + _ uintptr // padding: wasted (try to fill cache-line at multiple of 4) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go deleted file mode 100644 index c894a30c1..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.10 -// +build go1.10 - -package codec - -const allowSetUnexportedEmbeddedPtr = false diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go deleted file mode 100644 index 1476eac01..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.10 -// +build !go1.10 - -package codec - -const allowSetUnexportedEmbeddedPtr = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go deleted file mode 100644 index c093eebd2..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.4 -// +build !go1.4 - -package codec - -import "errors" - -// This codec package will only work for go1.4 and above. -// This is for the following reasons: -// - go 1.4 was released in 2014 -// - go runtime is written fully in go -// - interface only holds pointers -// - reflect.Value is stabilized as 3 words - -var errCodecSupportedOnlyFromGo14 = errors.New("codec: go 1.3 and below are not supported") - -func init() { - panic(errCodecSupportedOnlyFromGo14) -} diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go deleted file mode 100644 index e1dfce4a7..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.5 && !go1.6 -// +build go1.5,!go1.6 - -package codec - -import "os" - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go deleted file mode 100644 index 5cb4564d2..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.6 && !go1.7 -// +build go1.6,!go1.7 - -package codec - -import "os" - -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go deleted file mode 100644 index 82ef3ef88..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package codec - -const genCheckVendor = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go deleted file mode 100644 index 10274048a..000000000 --- a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.5 -// +build !go1.5 - -package codec - -var genCheckVendor = false diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go index ecd87ba53..e737ef2b9 100644 --- a/vendor/github.com/ugorji/go/codec/helper.go +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -190,14 +190,17 @@ package codec // These are the TransientAddrK and TransientAddr2K methods of decPerType. import ( + "bytes" "encoding" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" "math" "reflect" "runtime" + "runtime/debug" "sort" "strconv" "strings" @@ -207,18 +210,25 @@ import ( "unicode/utf8" ) -// if debugging is true, then -// - within Encode/Decode, do not recover from panic's -// - etc -// -// Note: Negative tests that check for errors will fail, so only use this -// when debugging, and run only one test at a time preferably. -// -// Note: RPC tests depend on getting the error from an Encode/Decode call. -// Consequently, they will always fail if debugging = true. -const debugging = false - const ( + // if debugging is true, then + // - within Encode/Decode, do not recover from panic's + // - etc + // + // Note: Negative tests that check for errors will fail, so only use this + // when debugging, and run only one test at a time preferably. + // + // Note: RPC tests depend on getting the error from an Encode/Decode call. + // Consequently, they will always fail if debugging = true. + // + // It is generally set to false + debugging = false + + // if debugLogging is false, debugf calls will be a No-op. + // + // It is generally set to true + debugLogging = true + // containerLenUnknown is length returned from Read(Map|Array)Len // when a format doesn't know apiori. // For example, json doesn't pre-determine the length of a container (sequence/map). @@ -240,9 +250,6 @@ const ( // This constant flag will enable or disable it. supportMarshalInterfaces = true - // bytesFreeListNoCache is used for debugging, when we want to skip using a cache of []byte. - bytesFreeListNoCache = false - // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. cacheLineSize = 64 @@ -250,11 +257,38 @@ const ( wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize wordSize = wordSizeBits / 8 + // byteBufSize is the default size of []byte used + // possibly for bufioWriter, etc + byteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 + // MARKER: determines whether to skip calling fastpath(En|De)codeTypeSwitch. // Calling the fastpath switch in encode() or decode() could be redundant, // as we still have to introspect it again within fnLoad // to determine the function to use for values of that type. skipFastpathTypeSwitchInDirectCall = false + + // maxArrayLen is the size of uint, which determines + // the maximum length of any array. + maxArrayLen = 1<<((32<<(^uint(0)>>63))-1) - 1 + + // ---- below this line, useXXX consts should be true + + usePoolForSFIs = true + useArenaForSFIs = true + + usePoolForTypeInfoLoad = true + + usePoolForSideEncode = true + + usePoolForSideDecode = true + + useBytesFreeList = true + + useSfiRvFreeList = true + + // ---- below this line, useXXX consts should be false + + useBytesFreeListPutGetSeparateCalls = false ) const cpu32Bit = ^uint(0)>>32 == 0 @@ -267,16 +301,6 @@ const ( rkindChan = rkind(reflect.Chan) ) -type mapKeyFastKind uint8 - -const ( - mapKeyFastKind32 = iota + 1 - mapKeyFastKind32ptr - mapKeyFastKind64 - mapKeyFastKind64ptr - mapKeyFastKindStr -) - var ( // use a global mutex to ensure each Handle is initialized. // We do this, so we don't have to store the basicHandle mutex @@ -289,7 +313,10 @@ var ( digitCharBitset bitset256 numCharBitset bitset256 whitespaceCharBitset bitset256 - asciiAlphaNumBitset bitset256 + // asciiAlphaNumBitset bitset256 + + jsonCharHtmlSafeBitset bitset256 + jsonCharSafeBitset bitset256 // numCharWithExpBitset64 bitset64 // numCharNoExpBitset64 bitset64 @@ -313,8 +340,6 @@ var ( // scalarBitset sets bit for all kinds which are scalars/primitives and thus immutable scalarBitset bitset32 - mapKeyFastKindVals [32]mapKeyFastKind - // codecgen is set to true by codecgen, so that tests, etc can use this information as needed. codecgen bool @@ -322,6 +347,21 @@ var ( zeroByteSlice = oneByteArr[:0:0] eofReader devNullReader + + // string containing all values of a uint8 in sequence. + // We maintain a [256]byte slice, for efficiently making strings with one byte. + // str256 string + + // handleNewFns []handleNewFn + + basicErrDecorator errDecoratorDef + + // sentinel value passed to panicValToErr, signifying to call recover yourself + callRecoverSentinel = new(byte) + + // debugstackOnce is used to put a single debugStack at a certain point (during debugging). + // To use, just call debugstackOnce() wherever you need to see a stack only once. + debugstackOnce = sync.OnceFunc(debug.PrintStack) ) var ( @@ -340,41 +380,13 @@ var ( errNoFormatHandle = errors.New("no handle (cannot identify format)") ) -var pool4tiload = sync.Pool{ +var poolForTypeInfoLoad = sync.Pool{ New: func() interface{} { - return &typeInfoLoad{ - etypes: make([]uintptr, 0, 4), - sfis: make([]structFieldInfo, 0, 4), - sfiNames: make(map[string]uint16, 4), - } + return newTypeInfoLoad() }, } func init() { - xx := func(f mapKeyFastKind, k ...reflect.Kind) { - for _, v := range k { - mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31' - } - } - - var f mapKeyFastKind - - f = mapKeyFastKind64 - if wordSizeBits == 32 { - f = mapKeyFastKind32 - } - xx(f, reflect.Int, reflect.Uint, reflect.Uintptr) - - f = mapKeyFastKind64ptr - if wordSizeBits == 32 { - f = mapKeyFastKind32ptr - } - xx(f, reflect.Ptr) - - xx(mapKeyFastKindStr, reflect.String) - xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32) - xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64) - numBoolBitset. set(byte(reflect.Bool)). set(byte(reflect.Int)). @@ -425,9 +437,9 @@ func init() { // set(byte(reflect.String)) for i := byte(0); i <= utf8.RuneSelf; i++ { - if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') { - asciiAlphaNumBitset.set(i) - } + // if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') || i == '_' { + // asciiAlphaNumBitset.set(i) + // } switch i { case ' ', '\t', '\r', '\n': whitespaceCharBitset.set(i) @@ -440,16 +452,74 @@ func init() { numCharBitset.set(i) } } + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeBitset: all true except (0-31) " \ + // jsonCharHtmlSafeBitset: all true except (0-31) " \ < > & + for i := byte(32); i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeBitset.set(i) // = true + default: + jsonCharSafeBitset.set(i) + jsonCharHtmlSafeBitset.set(i) + } + } } -// driverStateManager supports the runtime state of an (enc|dec)Driver. +func searchRtids(s []uintptr, v uintptr) (i uint, ok bool) { + var h uint + var j uint = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2 + if s[h] < v { + i = h + 1 + } else { + j = h + } + goto LOOP + } + return i, i < uint(len(s)) && s[i] == v +} + +// circularRefChecker holds interfaces during an encoding (if CheckCircularRef=true) // -// During a side(En|De)code call, we can capture the state, reset it, -// and then restore it later to continue the primary encoding/decoding. -type driverStateManager interface { - resetState() - captureState() interface{} - restoreState(state interface{}) +// We considered using a []uintptr (slice of pointer addresses) retrievable via rv.UnsafeAddr. +// However, it is possible for the same pointer to point to 2 different types e.g. +// +// type T struct { tHelper } +// Here, for var v T; &v and &v.tHelper are the same pointer. +// +// Consequently, we need a tuple of type and pointer, which interface{} natively provides. +// +// Note: the following references, if seen, can lead to circular references +// - Pointer to struct/slice/array/map (any container) +// - map (reference, where a value in a kv pair could be the map itself) +// - addr of slice/array element +// - add of struct field +type circularRefChecker []interface{} // []uintptr + +func (ci *circularRefChecker) push(v interface{}) { + for _, vv := range *ci { + if eq4i(v, vv) { // error if sptr already seen + halt.errorf("circular reference found: %p, %T", v, v) + } + } + *ci = append(*ci, v) +} + +func (_ *circularRefChecker) canPushElemKind(elemKind reflect.Kind) bool { + switch elemKind { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + return true + } + return false +} + +func (ci *circularRefChecker) pop(num int) { + *ci = (*ci)[:len(*ci)-num] } type bdAndBdread struct { @@ -457,10 +527,7 @@ type bdAndBdread struct { bd byte } -func (x bdAndBdread) captureState() interface{} { return x } -func (x *bdAndBdread) resetState() { x.bd, x.bdRead = 0, false } -func (x *bdAndBdread) reset() { x.resetState() } -func (x *bdAndBdread) restoreState(v interface{}) { *x = v.(bdAndBdread) } +func (x *bdAndBdread) reset() { x.bd, x.bdRead = 0, false } type clsErr struct { err error // error on closing @@ -570,6 +637,7 @@ type fauxUnion struct { b bool // state + a dBytesAttachState v valueType } @@ -580,12 +648,21 @@ type typeInfoLoad struct { sfiNames map[string]uint16 } +func newTypeInfoLoad() *typeInfoLoad { + return &typeInfoLoad{ + etypes: make([]uintptr, 0, 4), + sfis: make([]structFieldInfo, 0, 4), + sfiNames: make(map[string]uint16, 4), + } +} + func (x *typeInfoLoad) reset() { x.etypes = x.etypes[:0] x.sfis = x.sfis[:0] - for k := range x.sfiNames { // optimized to zero the map - delete(x.sfiNames, k) - } + clear(x.sfiNames) + // for k := range x.sfiNames { // optimized to zero the map + // delete(x.sfiNames, k) + // } } // mirror json.Marshaler and json.Unmarshaler here, @@ -606,6 +683,15 @@ type isCodecEmptyer interface { IsCodecEmpty() bool } +type outOfBoundsError struct { + capacity uint + requested uint +} + +func (x *outOfBoundsError) Error() string { + return sprintf("out of bounds with capacity = %d, requested %d", x.capacity, x.requested) +} + type codecError struct { err error name string @@ -638,6 +724,8 @@ func wrapCodecErr(in error, name string, numbytesread int, encode bool) (out err var ( bigen bigenHelper + // bigenB bigenWriter[bytesEncAppenderM] + // bigenIO bigenWriter[bufioEncWriterM] bigenstd = binary.BigEndian @@ -671,11 +759,10 @@ var ( jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() - selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() - iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() - isCodecEmptyerTyp = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem() - isSelferViaCodecgenerTyp = reflect.TypeOf((*isSelferViaCodecgener)(nil)).Elem() + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() + iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() + isCodecEmptyerTyp = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem() uint8TypId = rt2id(uint8Typ) uint8SliceTypId = rt2id(uint8SliceTyp) @@ -732,10 +819,6 @@ type Selfer interface { CodecDecodeSelf(*Decoder) } -type isSelferViaCodecgener interface { - codecSelferViaCodecgen() -} - // MissingFielder defines the interface allowing structs to internally decode or encode // values which do not map to struct fields. // @@ -782,6 +865,18 @@ type MapBySlice interface { MapBySlice() } +// const ( +// rtidfn_Enc_IO = iota +// rtidfn_Enc_IO_NoExt +// rtidfn_Dec_IO +// rtidfn_Dec_IO_NoExt + +// rtidfn_Enc_Bytes +// rtidfn_Enc_Bytes_NoExt +// rtidfn_Dec_Bytes +// rtidfn_Dec_Bytes_NoExt +// ) + // basicHandleRuntimeState holds onto all BasicHandle runtime and cached config information. // // Storing this outside BasicHandle allows us create shallow copies of a Handle, @@ -790,11 +885,6 @@ type MapBySlice interface { // temporarily when running tests in parallel, without running the risk that a test executing // in parallel with other tests does not see a transient modified values not meant for it. type basicHandleRuntimeState struct { - // these are used during runtime. - // At init time, they should have nothing in them. - rtidFns atomicRtidFnSlice - rtidFnsNoExt atomicRtidFnSlice - // Note: basicHandleRuntimeState is not comparable, due to these slices here (extHandle, intf2impls). // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. // Thses slices are used all the time, so keep as slices (not pointers). @@ -803,16 +893,32 @@ type basicHandleRuntimeState struct { intf2impls + // these keep track of the []codecRtidFns for this handle. + // We used a non-generic value so we can: + // - keep these within BasicHandle + // - work around recursive limitations of go's generics + rtidFnsEncIO, + rtidFnsEncNoExtIO, + rtidFnsEncBytes, + rtidFnsEncNoExtBytes, + rtidFnsDecIO, + rtidFnsDecNoExtIO, + rtidFnsDecBytes, + rtidFnsDecNoExtBytes atomicRtidFnSlice + + sideEncPool sync.Pool + sideDecPool sync.Pool + mu sync.Mutex jsonHandle bool binaryHandle bool - // timeBuiltin is initialized from TimeNotBuiltin, and used internally. + // timeBuiltin is initialized from TimeNotBuiltin, and used internally by setExt. // once initialized, it cannot be changed, as the function for encoding/decoding time.Time // will have been cached and the TimeNotBuiltin value will not be consulted thereafter. timeBuiltin bool - _ bool // padding + // _ bool // padding } // BasicHandle encapsulates the common options and extension functions. @@ -827,7 +933,7 @@ type BasicHandle struct { // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json TypeInfos *TypeInfos - *basicHandleRuntimeState + basicHandleRuntimeState // ---- cache line @@ -865,6 +971,7 @@ type BasicHandle struct { // ---- cache line inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc) + // name string } // initHandle does a one-time initialization of the handle. @@ -886,17 +993,48 @@ func initHandle(hh Handle) { // is not sufficient, since a race condition can occur within init(Handle) function. // init is made noinline, so that this function can be inlined by its caller. if atomic.LoadUint32(&x.inited) == 0 { - x.initHandle(hh) + initHandle2(x, hh) } } +// initHandle2 should be called only from codec.initHandle global function. +// make it uninlineable, as it is called at most once for each handle. +// +//go:noinline +func initHandle2(x *BasicHandle, hh Handle) { + handleInitMu.Lock() + defer handleInitMu.Unlock() // use defer, as halt may panic below + if x.inited != 0 { + return + } + x.jsonHandle = hh.isJson() + x.binaryHandle = hh.isBinary() + x.basicInit() + + x.sideEncPool.New = func() any { + return NewEncoderBytes(nil, hh).encoderI + } + x.sideDecPool.New = func() any { + return NewDecoderBytes(nil, hh).decoderI + } + + // hh.init() + + atomic.StoreUint32(&x.inited, 1) +} + func (x *BasicHandle) basicInit() { - x.rtidFns.store(nil) - x.rtidFnsNoExt.store(nil) + // ensure MapType and SliceType are of correct type + if x.MapType != nil && x.MapType.Kind() != reflect.Map { + halt.onerror(errMapTypeNotMapKind) + } + if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice { + halt.onerror(errSliceTypeNotSliceKind) + } x.timeBuiltin = !x.TimeNotBuiltin } -func (x *BasicHandle) init() {} +// func (x *BasicHandle) init() {} func (x *BasicHandle) isInited() bool { return atomic.LoadUint32(&x.inited) != 0 @@ -907,20 +1045,6 @@ func (x *BasicHandle) clearInited() { atomic.StoreUint32(&x.inited, 0) } -// TimeBuiltin returns whether time.Time OOTB support is used, -// based on the initial configuration of TimeNotBuiltin -func (x *basicHandleRuntimeState) TimeBuiltin() bool { - return x.timeBuiltin -} - -func (x *basicHandleRuntimeState) isJs() bool { - return x.jsonHandle -} - -func (x *basicHandleRuntimeState) isBe() bool { - return x.binaryHandle -} - func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) (err error) { rk := rt.Kind() for rk == reflect.Ptr { @@ -957,32 +1081,6 @@ func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) ( return } -// initHandle should be called only from codec.initHandle global function. -// make it uninlineable, as it is called at most once for each handle. -// -//go:noinline -func (x *BasicHandle) initHandle(hh Handle) { - handleInitMu.Lock() - defer handleInitMu.Unlock() // use defer, as halt may panic below - if x.inited == 0 { - if x.basicHandleRuntimeState == nil { - x.basicHandleRuntimeState = new(basicHandleRuntimeState) - } - x.jsonHandle = hh.isJson() - x.binaryHandle = hh.isBinary() - // ensure MapType and SliceType are of correct type - if x.MapType != nil && x.MapType.Kind() != reflect.Map { - halt.onerror(errMapTypeNotMapKind) - } - if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice { - halt.onerror(errSliceTypeNotSliceKind) - } - x.basicInit() - hh.init() - atomic.StoreUint32(&x.inited, 1) - } -} - func (x *BasicHandle) getBasicHandle() *BasicHandle { return x } @@ -994,305 +1092,13 @@ func (x *BasicHandle) typeInfos() *TypeInfos { return defTypeInfos } +// getTypeInfo expects a non-pointer func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { return x.typeInfos().get(rtid, rt) } -func findRtidFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { - // binary search. adapted from sort/search.go. - // Note: we use goto (instead of for loop) so this can be inlined. - - // h, i, j := 0, 0, len(s) - var h uint // var h, i uint - var j = uint(len(s)) -LOOP: - if i < j { - h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2 - if s[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - goto LOOP - } - if i < uint(len(s)) && s[i].rtid == rtid { - fn = s[i].fn - } - return -} - -func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) { - return x.fnVia(rt, x.typeInfos(), &x.rtidFns, x.CheckCircularRef, true) -} - -func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) { - return x.fnVia(rt, x.typeInfos(), &x.rtidFnsNoExt, x.CheckCircularRef, false) -} - -func (x *basicHandleRuntimeState) fnVia(rt reflect.Type, tinfos *TypeInfos, fs *atomicRtidFnSlice, checkCircularRef, checkExt bool) (fn *codecFn) { - rtid := rt2id(rt) - sp := fs.load() - if sp != nil { - if _, fn = findRtidFn(sp, rtid); fn != nil { - return - } - } - - fn = x.fnLoad(rt, rtid, tinfos, checkCircularRef, checkExt) - x.mu.Lock() - sp = fs.load() - // since this is an atomic load/store, we MUST use a different array each time, - // else we have a data race when a store is happening simultaneously with a findRtidFn call. - if sp == nil { - sp = []codecRtidFn{{rtid, fn}} - fs.store(sp) - } else { - idx, fn2 := findRtidFn(sp, rtid) - if fn2 == nil { - sp2 := make([]codecRtidFn, len(sp)+1) - copy(sp2[idx+1:], sp[idx:]) - copy(sp2, sp[:idx]) - sp2[idx] = codecRtidFn{rtid, fn} - fs.store(sp2) - } - } - x.mu.Unlock() - return -} - -func fnloadFastpathUnderlying(ti *typeInfo) (f *fastpathE, u reflect.Type) { - var rtid uintptr - var idx int - rtid = rt2id(ti.fastpathUnderlying) - idx = fastpathAvIndex(rtid) - if idx == -1 { - return - } - f = &fastpathAv[idx] - if uint8(reflect.Array) == ti.kind { - u = reflectArrayOf(ti.rt.Len(), ti.elem) - } else { - u = f.rt - } - return -} - -func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, checkCircularRef, checkExt bool) (fn *codecFn) { - fn = new(codecFn) - fi := &(fn.i) - ti := tinfos.get(rtid, rt) - fi.ti = ti - rk := reflect.Kind(ti.kind) - - // anything can be an extension except the built-in ones: time, raw and rawext. - // ensure we check for these types, then if extension, before checking if - // it implementes one of the pre-declared interfaces. - - fi.addrDf = true - // fi.addrEf = true - - if rtid == timeTypId && x.timeBuiltin { - fn.fe = (*Encoder).kTime - fn.fd = (*Decoder).kTime - } else if rtid == rawTypId { - fn.fe = (*Encoder).raw - fn.fd = (*Decoder).raw - } else if rtid == rawExtTypId { - fn.fe = (*Encoder).rawExt - fn.fd = (*Decoder).rawExt - fi.addrD = true - fi.addrE = true - } else if xfFn := x.getExt(rtid, checkExt); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.fe = (*Encoder).ext - fn.fd = (*Decoder).ext - fi.addrD = true - if rk == reflect.Struct || rk == reflect.Array { - fi.addrE = true - } - } else if (ti.flagSelfer || ti.flagSelferPtr) && - !(checkCircularRef && ti.flagSelferViaCodecgen && ti.kind == byte(reflect.Struct)) { - // do not use Selfer generated by codecgen if it is a struct and CheckCircularRef=true - fn.fe = (*Encoder).selferMarshal - fn.fd = (*Decoder).selferUnmarshal - fi.addrD = ti.flagSelferPtr - fi.addrE = ti.flagSelferPtr - } else if supportMarshalInterfaces && x.isBe() && - (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && - (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { - fn.fe = (*Encoder).binaryMarshal - fn.fd = (*Decoder).binaryUnmarshal - fi.addrD = ti.flagBinaryUnmarshalerPtr - fi.addrE = ti.flagBinaryMarshalerPtr - } else if supportMarshalInterfaces && !x.isBe() && x.isJs() && - (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && - (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { - //If JSON, we should check JSONMarshal before textMarshal - fn.fe = (*Encoder).jsonMarshal - fn.fd = (*Decoder).jsonUnmarshal - fi.addrD = ti.flagJsonUnmarshalerPtr - fi.addrE = ti.flagJsonMarshalerPtr - } else if supportMarshalInterfaces && !x.isBe() && - (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && - (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { - fn.fe = (*Encoder).textMarshal - fn.fd = (*Decoder).textUnmarshal - fi.addrD = ti.flagTextUnmarshalerPtr - fi.addrE = ti.flagTextMarshalerPtr - } else { - if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { - // by default (without using unsafe), - // if an array is not addressable, converting from an array to a slice - // requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array). - // - // (Non-addressable arrays mostly occur as keys/values from a map). - // - // However, fastpath functions are mostly for slices of numbers or strings, - // which are small by definition and thus allocation should be fast/cheap in time. - // - // Consequently, the value of doing this quick allocation to elide the overhead cost of - // non-optimized (not-unsafe) reflection is a fair price. - var rtid2 uintptr - if !ti.flagHasPkgPath { // un-named type (slice or mpa or array) - rtid2 = rtid - if rk == reflect.Array { - rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem) - } - if idx := fastpathAvIndex(rtid2); idx != -1 { - fn.fe = fastpathAv[idx].encfn - fn.fd = fastpathAv[idx].decfn - fi.addrD = true - fi.addrDf = false - if rk == reflect.Array { - fi.addrD = false // decode directly into array value (slice made from it) - } - } - } else { // named type (with underlying type of map or slice or array) - // try to use mapping for underlying type - xfe, xrt := fnloadFastpathUnderlying(ti) - if xfe != nil { - xfnf := xfe.encfn - xfnf2 := xfe.decfn - if rk == reflect.Array { - fi.addrD = false // decode directly into array value (slice made from it) - fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { - xfnf2(d, xf, rvConvert(xrv, xrt)) - } - } else { - fi.addrD = true - fi.addrDf = false // meaning it can be an address(ptr) or a value - xptr2rt := reflect.PtrTo(xrt) - fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { - if xrv.Kind() == reflect.Ptr { - xfnf2(d, xf, rvConvert(xrv, xptr2rt)) - } else { - xfnf2(d, xf, rvConvert(xrv, xrt)) - } - } - } - fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { - xfnf(e, xf, rvConvert(xrv, xrt)) - } - } - } - } - if fn.fe == nil && fn.fd == nil { - switch rk { - case reflect.Bool: - fn.fe = (*Encoder).kBool - fn.fd = (*Decoder).kBool - case reflect.String: - // Do not use different functions based on StringToRaw option, as that will statically - // set the function for a string type, and if the Handle is modified thereafter, - // behaviour is non-deterministic - // i.e. DO NOT DO: - // if x.StringToRaw { - // fn.fe = (*Encoder).kStringToRaw - // } else { - // fn.fe = (*Encoder).kStringEnc - // } - - fn.fe = (*Encoder).kString - fn.fd = (*Decoder).kString - case reflect.Int: - fn.fd = (*Decoder).kInt - fn.fe = (*Encoder).kInt - case reflect.Int8: - fn.fe = (*Encoder).kInt8 - fn.fd = (*Decoder).kInt8 - case reflect.Int16: - fn.fe = (*Encoder).kInt16 - fn.fd = (*Decoder).kInt16 - case reflect.Int32: - fn.fe = (*Encoder).kInt32 - fn.fd = (*Decoder).kInt32 - case reflect.Int64: - fn.fe = (*Encoder).kInt64 - fn.fd = (*Decoder).kInt64 - case reflect.Uint: - fn.fd = (*Decoder).kUint - fn.fe = (*Encoder).kUint - case reflect.Uint8: - fn.fe = (*Encoder).kUint8 - fn.fd = (*Decoder).kUint8 - case reflect.Uint16: - fn.fe = (*Encoder).kUint16 - fn.fd = (*Decoder).kUint16 - case reflect.Uint32: - fn.fe = (*Encoder).kUint32 - fn.fd = (*Decoder).kUint32 - case reflect.Uint64: - fn.fe = (*Encoder).kUint64 - fn.fd = (*Decoder).kUint64 - case reflect.Uintptr: - fn.fe = (*Encoder).kUintptr - fn.fd = (*Decoder).kUintptr - case reflect.Float32: - fn.fe = (*Encoder).kFloat32 - fn.fd = (*Decoder).kFloat32 - case reflect.Float64: - fn.fe = (*Encoder).kFloat64 - fn.fd = (*Decoder).kFloat64 - case reflect.Complex64: - fn.fe = (*Encoder).kComplex64 - fn.fd = (*Decoder).kComplex64 - case reflect.Complex128: - fn.fe = (*Encoder).kComplex128 - fn.fd = (*Decoder).kComplex128 - case reflect.Chan: - fn.fe = (*Encoder).kChan - fn.fd = (*Decoder).kChan - case reflect.Slice: - fn.fe = (*Encoder).kSlice - fn.fd = (*Decoder).kSlice - case reflect.Array: - fi.addrD = false // decode directly into array value (slice made from it) - fn.fe = (*Encoder).kArray - fn.fd = (*Decoder).kArray - case reflect.Struct: - if ti.anyOmitEmpty || - ti.flagMissingFielder || - ti.flagMissingFielderPtr { - fn.fe = (*Encoder).kStruct - } else { - fn.fe = (*Encoder).kStructNoOmitempty - } - fn.fd = (*Decoder).kStruct - case reflect.Map: - fn.fe = (*Encoder).kMap - fn.fd = (*Decoder).kMap - case reflect.Interface: - // encode: reflect.Interface are handled already by preEncodeValue - fn.fd = (*Decoder).kInterface - fn.fe = (*Encoder).kErr - default: - // reflect.Ptr and reflect.Interface are handled already by preEncodeValue - fn.fe = (*Encoder).kErr - fn.fd = (*Decoder).kErr - } - } - } - return +func (x *BasicHandle) getTypeInfo4RT(rt reflect.Type) (pti *typeInfo) { + return x.typeInfos().get(rt2id(rt), rt) } // Handle defines a specific encoding format. It also stores any runtime state @@ -1313,14 +1119,18 @@ func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos * type Handle interface { Name() string getBasicHandle() *BasicHandle - newEncDriver() encDriver - newDecDriver() decDriver isBinary() bool isJson() bool // json is special for now, so track it // desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood. desc(bd byte) string // init initializes the handle based on handle-specific info (beyond what is in BasicHandle) - init() + // init() + // clone() Handle + newEncoderBytes(out *[]byte) encoderI + newEncoder(w io.Writer) encoderI + + newDecoderBytes(in []byte) decoderI + newDecoder(r io.Reader) decoderI } // Raw represents raw formatted bytes. @@ -1330,11 +1140,13 @@ type Handle interface { type Raw []byte // RawExt represents raw unprocessed extension data. +// // Some codecs will decode extension data as a *RawExt // if there is no registered extension for the tag. // -// Only one of Data or Value is nil. -// If Data is nil, then the content of the RawExt is in the Value. +// On encode, encode the Data. If nil, then try to encode the Value. +// +// On decode: store tag, then store bytes and/or decode into Value. type RawExt struct { Tag uint64 // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. @@ -1454,12 +1266,10 @@ type extFailWrapper struct { type binaryEncodingType struct{} func (binaryEncodingType) isBinary() bool { return true } -func (binaryEncodingType) isJson() bool { return false } type textEncodingType struct{} func (textEncodingType) isBinary() bool { return false } -func (textEncodingType) isJson() bool { return false } type notJsonType struct{} @@ -1487,11 +1297,8 @@ func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} // retrofitted from stdlib: encoding/binary/BigEndian (ByteOrder) type bigenHelper struct{} -func (z bigenHelper) PutUint16(v uint16) (b [2]byte) { - return [...]byte{ - byte(v >> 8), - byte(v), - } +func (z bigenHelper) PutUint16(v uint16) (b1, b2 byte) { + return byte(v >> 8), byte(v) } func (z bigenHelper) PutUint32(v uint32) (b [4]byte) { @@ -1539,23 +1346,6 @@ func (z bigenHelper) Uint64(b [8]byte) (v uint64) { uint64(b[0])<<56 } -func (z bigenHelper) writeUint16(w *encWr, v uint16) { - x := z.PutUint16(v) - w.writen2(x[0], x[1]) -} - -func (z bigenHelper) writeUint32(w *encWr, v uint32) { - // w.writeb((z.PutUint32(v))[:]) - // x := z.PutUint32(v) - // w.writeb(x[:]) - // w.writen4(x[0], x[1], x[2], x[3]) - w.writen4(z.PutUint32(v)) -} - -func (z bigenHelper) writeUint64(w *encWr, v uint64) { - w.writen8(z.PutUint64(v)) -} - type extTypeTagFn struct { rtid uintptr rtidptr uintptr @@ -1584,14 +1374,13 @@ func (x *BasicHandle) AddExt(rt reflect.Type, tag byte, // An error is returned if that is not honored. // To Deregister an ext, call SetExt with nil Ext. // -// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// It will throw an error if called after the Handle has been initialized. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead (which *may* internally call this) func (x *BasicHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { if x.isInited() { return errHandleInited } - if x.basicHandleRuntimeState == nil { - x.basicHandleRuntimeState = new(basicHandleRuntimeState) - } return x.basicHandleRuntimeState.setExt(rt, tag, ext) } @@ -1672,6 +1461,16 @@ func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { return } +type structFieldInfoNode struct { + offset uint16 + index uint16 + kind uint8 + numderef uint8 + _ uint16 // padding + + typ reflect.Type +} + // structFieldinfopathNode is a node in a tree, which allows us easily // walk the anonymous path. // @@ -1679,19 +1478,7 @@ func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { // will be nil and this information becomes a value (not needing any indirection). type structFieldInfoPathNode struct { parent *structFieldInfoPathNode - - offset uint16 - index uint16 - kind uint8 - numderef uint8 - - // encNameAsciiAlphaNum and omitEmpty should be in structFieldInfo, - // but are kept here for tighter packaging. - - encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers - omitEmpty bool - - typ reflect.Type + structFieldInfoNode } // depth returns number of valid nodes in the hierachy @@ -1705,34 +1492,17 @@ TOP: return } -// field returns the field of the struct. -func (path *structFieldInfoPathNode) field(v reflect.Value) (rv2 reflect.Value) { - if parent := path.parent; parent != nil { - v = parent.field(v) - for j, k := uint8(0), parent.numderef; j < k; j++ { - if rvIsNil(v) { - return - } - v = v.Elem() - } - } - return path.rvField(v) -} - -// fieldAlloc returns the field of the struct. -// It allocates if a nil value was seen while searching. -func (path *structFieldInfoPathNode) fieldAlloc(v reflect.Value) (rv2 reflect.Value) { - if parent := path.parent; parent != nil { - v = parent.fieldAlloc(v) - for j, k := uint8(0), parent.numderef; j < k; j++ { - if rvIsNil(v) { - rvSetDirect(v, reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - } - return path.rvField(v) -} +// MARKER: fully working code - commented out as we inline the code in sfi.field(No)Alloc +// // field returns the field of the struct. +// func (n *structFieldInfoPathNode) field(v reflect.Value, alloc, base bool) (rv reflect.Value) { +// if n.parent != nil { +// v = n.parent.field(v, alloc, true) +// if !v.IsValid() { +// return +// } +// } +// return n.structFieldInfoNode.field(v, alloc, base) +// } type structFieldInfo struct { encName string // encode name @@ -1741,10 +1511,86 @@ type structFieldInfo struct { // fieldName string // currently unused - // encNameAsciiAlphaNum and omitEmpty should be here, - // but are stored in structFieldInfoPathNode for tighter packaging. + encNameEscape4Json bool + omitEmpty bool - path structFieldInfoPathNode + ptrKind bool + + encBuiltin bool // is field supported for encoding as a builtin? + decBuiltin bool // is field addr supported for decoding as a builtin? + + node structFieldInfoNode + parents []structFieldInfoNode + + // path structFieldInfoPathNode + + baseTyp reflect.Type + ptrTyp reflect.Type +} + +// MARKER: fully working code - commented out as we inline the code in sfi.field(No)Alloc +// func (n *structFieldInfo) field(v reflect.Value, alloc, base bool) (rv reflect.Value) { +// for i := range n.parents { +// v = n.parents[i].field(v, alloc, true) +// if !v.IsValid() { +// return +// } +// } +// return n.node.field(v, alloc, base) +// } + +func (n *structFieldInfo) fieldAlloc(v reflect.Value) reflect.Value { + // return n.path.field(v, true, true) + // return n.field(v, true, true) + var j, nd uint8 + for i := range n.parents { + v = n.parents[i].rvField(v) + nd = n.parents[i].numderef + for j = 0; j < nd; j++ { + if rvPtrIsNil(v) { + rvSetDirect(v, reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = n.node.rvField(v) + nd = n.node.numderef + for j = 0; j < nd; j++ { + if rvPtrIsNil(v) { + rvSetDirect(v, reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} + +func (n *structFieldInfo) fieldNoAlloc(v reflect.Value, base bool) (rv reflect.Value) { + // return n.path.field(v, false, base) + // return n.field(v, false, base) + var j, nd uint8 + for i := range n.parents { + v = n.parents[i].rvField(v) + nd = n.parents[i].numderef + for j = 0; j < nd; j++ { + if rvPtrIsNil(v) { + return reflect.Value{} + } + v = v.Elem() + } + } + v = n.node.rvField(v) + rv = v + nd = n.node.numderef + for j = 0; j < nd; j++ { + if rvPtrIsNil(v) { + return reflect.Value{} + } + v = v.Elem() + } + if base { + rv = v + } + return } func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { @@ -1777,22 +1623,168 @@ func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { return } -func (si *structFieldInfo) parseTag(stag string) { +func parseStructFieldTag(stag string) (encName string, omitEmpty bool) { if stag == "" { return } for i, s := range strings.Split(stag, ",") { if i == 0 { if s != "" { - si.encName = s - } - } else { - switch s { - case "omitempty": - si.path.omitEmpty = true + encName = s } + continue + } + if s == "omitempty" { + omitEmpty = true } } + return +} + +// ---- + +type uint8To32TrieNode struct { + uint8To32TrieNodeNoKids + kids uint8To32TrieNodeKids +} + +func (x *uint8To32TrieNode) reset(v uint8) { + x.key = v + x.value = 0 + x.valid = false + x.truncKids() +} + +func (x *uint8To32TrieNode) expandKids() (r *uint8To32TrieNode) { + // since we want to reuse the slices, let's not use append as it will + // always overwrite the value. Only append if we're expanding + kids := x.getKids() + if cap(kids) > len(kids) { + kids = kids[:len(kids)+1] + } else { + kids = append(kids, uint8To32TrieNode{}) + } + x.setKids(kids) + r = &kids[len(kids)-1] + return +} + +func (x *uint8To32TrieNode) put(v uint8) (r *uint8To32TrieNode) { + kids := x.getKids() + for i := range kids { + if kids[i].key == v { + return &kids[i] + } + } + + r = x.expandKids() + r.reset(v) + return r +} + +func (x *uint8To32TrieNode) puts(s string, v uint32) (r *uint8To32TrieNode) { + for _, c := range []byte(s) { + x = x.put(c) + } + x.value = v + x.valid = true + return x +} + +func (x *uint8To32TrieNode) gets(s []byte) (v uint32, ok bool) { +TOP: + for _, b := range s { + kids := x.getKids() + for i := range kids { + if kids[i].key == b { + x = &kids[i] + continue TOP + } + } + return 0, false + } + return x.value, x.valid +} + +func (x *uint8To32TrieNode) deepNumKids() (n int) { + kids := x.getKids() + n = len(kids) + for i := range kids { + n += kids[i].deepNumKids() + } + return +} + +// arena just helps all the nodes stay close for better cache-line performance. +// It basically tries to load up all the nodes within a contiguous space of memory. +type uint8To32TrieNodeArena struct { + arena []uint8To32TrieNode + cursor int +} + +func (x *uint8To32TrieNodeArena) init(v *uint8To32TrieNode) (r *uint8To32TrieNode) { + x.arena = make([]uint8To32TrieNode, v.deepNumKids()+1) // incl one for the node, and one buffer + r = &x.arena[0] + x.cursor++ + x.clone(r, v) + return +} + +func (x *uint8To32TrieNodeArena) clone(dst, src *uint8To32TrieNode) { + dst.uint8To32TrieNodeNoKids = src.uint8To32TrieNodeNoKids + // dst.kids = nil + srckids := src.getKids() + c := len(srckids) + if c == 0 { + return + } + dstkids := x.arena[x.cursor:][:c:c] + dst.setKids(dstkids) + x.cursor += c + for i := range srckids { + x.clone(&dstkids[i], &srckids[i]) + } +} + +// ---- + +var pool4SFIs = sync.Pool{ + New: func() interface{} { + return &uint8To32TrieNode{} + }, +} + +func (x *structFieldInfos) finish() { + var src *uint8To32TrieNode + if usePoolForSFIs { + src = pool4SFIs.Get().(*uint8To32TrieNode) + } else { + src = &x.t + } + x.loadSearchTrie(src) + if useArenaForSFIs { + var ar uint8To32TrieNodeArena + x.t = *(ar.init(src)) + } + if usePoolForSFIs { + src.reset(0) + pool4SFIs.Put(src) + } +} + +func (x *structFieldInfos) loadSearchTrie(src *uint8To32TrieNode) { + // load the search trie + for i, v := range x.source() { + src.puts(v.encName, uint32(i)) + } +} + +func (x *structFieldInfos) search(name []byte) (sfi *structFieldInfo) { + n, ok := x.t.gets(name) + if ok { + sfi = x.source()[n] + } + return } type sfiSortedByEncName []*structFieldInfo @@ -1821,7 +1813,7 @@ type typeInfo4Container struct { tielem *typeInfo } -// typeInfo keeps static (non-changing readonly)information +// typeInfo keeps static (non-changing readonly) information // about each (non-ptr) type referenced in the encode/decode sequence. // // During an encode/decode sequence, we work as below: @@ -1842,12 +1834,15 @@ type typeInfo struct { kind uint8 chandir uint8 - anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" - toArray bool // whether this (struct) type should be encoded as an array - keyType valueType // if struct, how is the field name stored in a stream? default is string - mbs bool // base type (T or *T) is a MapBySlice - - sfi4Name map[string]*structFieldInfo // map. used for finding sfi given a name + // simple=true if a struct, AND + // - none of the fields are tagged "omitempty" + // - no missingFielder + // - keyType is always string + // - noEsc4Json on any fields + simple bool + toArray bool // whether this (struct) type should be encoded as an array + keyType valueType // if struct, how is the field name stored in a stream? default is string + mbs bool // base type (T or *T) is a MapBySlice *typeInfo4Container @@ -1861,8 +1856,7 @@ type typeInfo struct { flagComparable bool flagCanTransient bool - flagMarshalInterface bool // does this have custom (un)marshal implementation? - flagSelferViaCodecgen bool + flagMarshalInterface bool // does this have custom (un)marshal implementation? // custom implementation flags flagIsZeroer bool @@ -1895,132 +1889,127 @@ type typeInfo struct { flagMissingFielder bool flagMissingFielderPtr bool + flagEncBuiltin bool + flagDecBuiltin bool + infoFieldOmitempty bool + // MARKER - may need padding here (like 6 bytes - auto-handled) sfi structFieldInfos } func (ti *typeInfo) siForEncName(name []byte) (si *structFieldInfo) { - return ti.sfi4Name[string(name)] + return ti.sfi.search(name) } func (ti *typeInfo) resolve(x []structFieldInfo, ss map[string]uint16) (n int) { n = len(x) - for i := range x { ui := uint16(i) - xn := x[i].encName + sf := &x[ui] + xn := sf.encName j, ok := ss[xn] - if ok { - i2clear := ui // index to be cleared - if x[i].path.depth() < x[j].path.depth() { // this one is shallower - ss[xn] = ui - i2clear = j - } - if x[i2clear].encName != "" { - x[i2clear].encName = "" - n-- - } - } else { + if !ok { ss[xn] = ui + continue } + if ui == j { + continue + } + // if x[i].path.depth() < x[j].path.depth() { // this one is shallower + sf2 := &x[j] + if len(sf.parents) < len(sf2.parents) { // this one is shallower + ss[xn] = ui + sf = sf2 + } + if sf.encName == "" { + continue + } + sf.encName = "" + n-- } - return } func (ti *typeInfo) init(x []structFieldInfo, n int) { - var anyOmitEmpty bool + simple := true + + if ti.flagMissingFielder || ti.flagMissingFielderPtr || + ti.keyType != valueTypeString { + simple = false + } // remove all the nils (non-ready) - m := make(map[string]*structFieldInfo, n) + // m := make(map[string]*structFieldInfo, n) w := make([]structFieldInfo, n) y := make([]*structFieldInfo, n+n) z := y[n:] y = y[:n] n = 0 for i := range x { - if x[i].encName == "" { + sfi := &x[i] + if sfi.encName == "" { continue } - if !anyOmitEmpty && x[i].path.omitEmpty { - anyOmitEmpty = true + if simple && (sfi.omitEmpty || sfi.encNameEscape4Json) { + simple = false } - w[n] = x[i] - y[n] = &w[n] - m[x[i].encName] = &w[n] + w[n] = *sfi + sfi = &w[n] + y[n] = sfi + // m[sfi.encName] = sfi n++ } if n != len(y) { - halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), n) + halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), any(n)) } + ti.simple = simple + copy(z, y) sort.Sort(sfiSortedByEncName(z)) - ti.anyOmitEmpty = anyOmitEmpty ti.sfi.load(y, z) - ti.sfi4Name = m + ti.sfi.finish() + // ti.sfi.byName = m } -// Handling flagCanTransient +// isCanTransient returns whether this type can be transient. +// +// # Handling flagCanTransient // // We support transient optimization if the kind of the type is -// a number, bool, string, or slice (of number/bool). -// In addition, we also support if the kind is struct or array, -// and the type does not contain any pointers recursively). +// - a number, bool, string +// - slice (of number/bool) +// - struct with no reference values (pointers, interface, etc) recursively +// - array with no reference values (pointers, interface, etc) recursively // -// Noteworthy that all reference types (string, slice, func, map, ptr, interface, etc) have pointers. +// NOTE: all reference types (string, slice, func, map, ptr, interface, etc) have pointers. // // If using transient for a type with a pointer, there is the potential for data corruption // when GC tries to follow a "transient" pointer which may become a non-pointer soon after. -// - -func transientBitsetFlags() *bitset32 { - if transientValueHasStringSlice { - return &numBoolStrSliceBitset +func isCanTransient(t reflect.Type, inclStrSlice bool) (v bool) { + k := t.Kind() + bset := &numBoolBitset + if inclStrSlice { + bset = &numBoolStrSliceBitset } - return &numBoolBitset -} - -func isCanTransient(t reflect.Type, k reflect.Kind) (v bool) { - var bs = transientBitsetFlags() - if bs.isset(byte(k)) { + if bset.isset(byte(k)) { v = true - } else if k == reflect.Slice { - elem := t.Elem() - v = numBoolBitset.isset(byte(elem.Kind())) } else if k == reflect.Array { - elem := t.Elem() - v = isCanTransient(elem, elem.Kind()) + v = isCanTransient(t.Elem(), false) } else if k == reflect.Struct { v = true for j, jlen := 0, t.NumField(); j < jlen; j++ { f := t.Field(j) - if !isCanTransient(f.Type, f.Type.Kind()) { - v = false - return + if !isCanTransient(f.Type, false) { + return false } } - } else { - v = false } return } -func (ti *typeInfo) doSetFlagCanTransient() { - if transientSizeMax > 0 { - ti.flagCanTransient = ti.size <= transientSizeMax - } else { - ti.flagCanTransient = true - } - if ti.flagCanTransient { - if !transientBitsetFlags().isset(ti.kind) { - ti.flagCanTransient = isCanTransient(ti.rt, reflect.Kind(ti.kind)) - } - } -} - type rtid2ti struct { rtid uintptr ti *typeInfo @@ -2031,7 +2020,7 @@ type rtid2ti struct { // It is configured with a set of tag keys, which are used to get // configuration for the type. type TypeInfos struct { - infos atomicTypeInfoSlice + infos atomic.Pointer[[]rtid2ti] // atomicTypeInfoSlice mu sync.Mutex _ uint64 // padding (cache-aligned) tags []string @@ -2088,9 +2077,9 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { } func (x *TypeInfos) find(rtid uintptr) (pti *typeInfo) { - sp := x.infos.load() + sp := x.infos.Load() if sp != nil { - _, pti = findTypeInfo(sp, rtid) + _, pti = findTypeInfo(*sp, rtid) } return } @@ -2099,7 +2088,7 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { rk := rt.Kind() if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { - halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) + halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk.String(), rt) } rtid := rt2id(rt) @@ -2108,7 +2097,7 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { // it may lead to duplication, but that's ok. ti := typeInfo{ rt: rt, - ptr: reflect.PtrTo(rt), + ptr: reflect.PointerTo(rt), rtid: rtid, kind: uint8(rk), size: uint32(rt.Size()), @@ -2119,6 +2108,12 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { flagHasPkgPath: rt.PkgPath() != "", } + _, ti.flagEncBuiltin = searchRtids(encBuiltinRtids, rtid) + _, ti.flagDecBuiltin = searchRtids(decBuiltinRtids, rtid) + if !ti.flagDecBuiltin { + _, ti.flagDecBuiltin = searchRtids(decBuiltinRtids, rt2id(ti.ptr)) + } + // bset sets custom implementation flags bset := func(when bool, b *bool) { if when { @@ -2159,11 +2154,7 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { bset(b1, &ti.flagIsCodecEmptyer) bset(b2, &ti.flagIsCodecEmptyerPtr) - b1, b2 = implIntf(rt, isSelferViaCodecgenerTyp) - ti.flagSelferViaCodecgen = b1 || b2 - ti.flagMarshalInterface = ti.flagSelfer || ti.flagSelferPtr || - ti.flagSelferViaCodecgen || ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr || ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr || ti.flagTextMarshaler || ti.flagTextMarshalerPtr || @@ -2175,7 +2166,7 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { // bset(b1, &ti.flagComparable) ti.flagComparable = b1 - ti.doSetFlagCanTransient() + ti.flagCanTransient = isTransientType4Size(ti.size) && isCanTransient(ti.rt, true) var tt reflect.Type switch rk { @@ -2187,14 +2178,22 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { } else { ti.keyType = valueTypeString } - pp, pi := &pool4tiload, pool4tiload.Get() - pv := pi.(*typeInfoLoad) - pv.reset() + var pi interface{} + var pv *typeInfoLoad + if usePoolForTypeInfoLoad { + pi = poolForTypeInfoLoad.Get() + pv = pi.(*typeInfoLoad) + pv.reset() + } else { + pv = newTypeInfoLoad() + } pv.etypes = append(pv.etypes, ti.rtid) - x.rget(rt, rtid, nil, pv, omitEmpty) + x.rget(rt, nil, pv, omitEmpty) n := ti.resolve(pv.sfis, pv.sfiNames) ti.init(pv.sfis, n) - pp.Put(pi) + if usePoolForTypeInfoLoad { + poolForTypeInfoLoad.Put(pi) + } case reflect.Map: ti.typeInfo4Container = new(typeInfo4Container) ti.elem = rt.Elem() @@ -2265,13 +2264,16 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { } x.mu.Lock() - sp := x.infos.load() + var sp []rtid2ti + if spt := x.infos.Load(); spt != nil { + sp = *spt + } // since this is an atomic load/store, we MUST use a different array each time, // else we have a data race when a store is happening simultaneously with a findRtidFn call. if sp == nil { pti = &ti sp = []rtid2ti{{rtid, pti}} - x.infos.store(sp) + x.infos.Store(&sp) } else { var idx uint idx, pti = findTypeInfo(sp, rtid) @@ -2281,15 +2283,14 @@ func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) { copy(sp2[idx+1:], sp[idx:]) copy(sp2, sp[:idx]) sp2[idx] = rtid2ti{rtid, pti} - x.infos.store(sp2) + x.infos.Store(&sp2) } } x.mu.Unlock() return } -func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, - path *structFieldInfoPathNode, pv *typeInfoLoad, omitEmpty bool) { +func (x *TypeInfos) rget(rt reflect.Type, path *structFieldInfoPathNode, pv *typeInfoLoad, defaultOmitEmpty bool) { // Read up fields and store how to access the value. // // It uses go's rules for message selectors, @@ -2318,41 +2319,40 @@ LOOP: if stag == "-" { continue } - var si structFieldInfo var numderef uint8 = 0 - for xft := f.Type; xft.Kind() == reflect.Ptr; xft = xft.Elem() { + ft := f.Type + for ; ft.Kind() == reflect.Ptr; ft = ft.Elem() { numderef++ } - var parsed bool + var encName string + var parsed, omitEmpty bool + + ftid := rt2id(ft) // if anonymous and no struct tag (or it's blank), // and a struct (or pointer to struct), inline it. if f.Anonymous && fkind != reflect.Interface { // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface - ft := f.Type - isPtr := ft.Kind() == reflect.Ptr - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } + isPtr := f.Type.Kind() == reflect.Ptr isStruct := ft.Kind() == reflect.Struct // Ignore embedded fields of unexported non-struct types. // Also, from go1.10, ignore pointers to unexported struct types // because unmarshal cannot assign a new struct to an unexported field. // See https://golang.org/issue/21357 - if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) { + if isUnexported && (!isStruct || isPtr) { continue } doInline := stag == "" if !doInline { - si.parseTag(stag) + encName, omitEmpty = parseStructFieldTag(stag) parsed = true - doInline = si.encName == "" // si.isZero() + doInline = encName == "" // si.isZero() } if doInline && isStruct { // if etypes contains this, don't call rget again (as fields are already seen here) - ftid := rt2id(ft) + // // We cannot recurse forever, but we need to track other field depths. // So - we break if we see a type twice (not the first time). // This should be sufficient to handle an embedded type that refers to its @@ -2371,54 +2371,83 @@ LOOP: if processIt { pv.etypes = append(pv.etypes, ftid) path2 := &structFieldInfoPathNode{ - parent: path, - typ: f.Type, - offset: uint16(f.Offset), - index: j, - kind: uint8(fkind), - numderef: numderef, + parent: path, + structFieldInfoNode: structFieldInfoNode{ + typ: f.Type, + offset: uint16(f.Offset), + index: j, + kind: uint8(fkind), + numderef: numderef, + }, } - x.rget(ft, ftid, path2, pv, omitEmpty) + x.rget(ft, path2, pv, defaultOmitEmpty) } continue } } // after the anonymous dance: if an unexported field, skip - if isUnexported || f.Name == "" { // f.Name cannot be "", but defensively handle it + if isUnexported || f.Name == "" || f.Name == structInfoFieldName { // f.Name cannot be "", but defensively handle it continue } - si.path = structFieldInfoPathNode{ - parent: path, + if !parsed { + encName, omitEmpty = parseStructFieldTag(stag) + parsed = true + } + if encName == "" { + encName = f.Name + } + if defaultOmitEmpty { + omitEmpty = true + } + + var si = structFieldInfo{ + encName: encName, + omitEmpty: omitEmpty, + ptrKind: fkind == reflect.Ptr, + baseTyp: ft, + ptrTyp: reflect.PointerTo(ft), + } + + // si.path = structFieldInfoPathNode{ + // parent: path, + // structFieldInfoNode: structFieldInfoNode{ + // typ: f.Type, + // offset: uint16(f.Offset), + // index: j, + // kind: uint8(fkind), + // numderef: numderef, + // }, + // } + + si.node = structFieldInfoNode{ typ: f.Type, offset: uint16(f.Offset), index: j, kind: uint8(fkind), numderef: numderef, - // set asciiAlphaNum to true (default); checked and may be set to false below - encNameAsciiAlphaNum: true, - // note: omitEmpty might have been set in an earlier parseTag call, etc - so carry it forward - omitEmpty: si.path.omitEmpty, } - if !parsed { - si.encName = f.Name - si.parseTag(stag) - parsed = true - } else if si.encName == "" { - si.encName = f.Name + if path != nil { + si.parents = make([]structFieldInfoNode, path.depth()) + for k, p := len(si.parents)-1, path; k >= 0; k-- { + si.parents[k] = p.structFieldInfoNode + p = p.parent + } } + // ftid = rt2id(ft) where ft = si.baseTyp) + _, si.encBuiltin = searchRtids(encBuiltinRtids, ftid) + _, si.decBuiltin = searchRtids(decBuiltinRtids, ftid) + if !si.decBuiltin { + _, si.decBuiltin = searchRtids(decBuiltinRtids, rt2id(si.ptrTyp)) + } // si.encNameHash = maxUintptr() // hashShortString(bytesView(si.encName)) - if omitEmpty { - si.path.omitEmpty = true - } - for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination - if !asciiAlphaNumBitset.isset(si.encName[i]) { - si.path.encNameAsciiAlphaNum = false + if !jsonCharSafeBitset.isset(si.encName[i]) { + si.encNameEscape4Json = true break } } @@ -2427,6 +2456,29 @@ LOOP: } } +type timeRv struct { + v time.Time + r reflect.Value +} + +type bytesRv struct { + v []byte + r reflect.Value +} + +type stringIntf struct { + v string + i interface{} +} + +func cmpTimeRv(v1, v2 timeRv) int { + return v1.v.Compare(v2.v) +} + +func cmpBytesRv(v1, v2 bytesRv) int { + return bytes.Compare(v1.v, v2.v) +} + func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { // return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) @@ -2441,11 +2493,15 @@ func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { if base { indir = true } else { - indir = reflect.PtrTo(rt).Implements(iTyp) + indir = reflect.PointerTo(rt).Implements(iTyp) } return } +func bytesOK(bs []byte, _ bool) []byte { + return bs +} + func bool2int(b bool) (v uint8) { // MARKER: optimized to be a single instruction if b { @@ -2455,25 +2511,97 @@ func bool2int(b bool) (v uint8) { } func isSliceBoundsError(s string) bool { - return strings.Contains(s, "index out of range") || - strings.Contains(s, "slice bounds out of range") + return strings.Contains(s, "index out of range") || // indexing error + strings.Contains(s, "slice bounds out of range") || // slicing error + strings.Contains(s, "cannot convert slice with length") // slice-->array error } func sprintf(format string, v ...interface{}) string { return fmt.Sprintf(format, v...) } -func panicValToErr(h errDecorator, v interface{}, err *error) { - if v == *err { +func snip(v []byte) []byte { + return v[:min(96, len(v))] +} + +// These constants are used within debugf. +// If the first parameter to debugf is one of these, it determines +// the ANSI color used within the ANSI terminal. +// +// They make it easier to write different groups of debug statements +// with a visual aid. +const ( + hlSFX = "\033[0m" + hlPFX = "\033[1;" + hlBLACK = hlPFX + "30" + "m" + hlRED = hlPFX + "31" + "m" + hlGREEN = hlPFX + "32" + "m" + hlYELLOW = hlPFX + "33" + "m" + hlBLUE = hlPFX + "34" + "m" + hlPURPLE = hlPFX + "35" + "m" + hlCYAN = hlPFX + "36" + "m" + hlWHITE = hlPFX + "37" + "m" + // hlORANGE = hlYELLOW +) + +// debugf will print debug statements to the screen whether or not debugging is on +// +// Note: if first parameter in a is one of the hlXXX vars, then we treat it as a hint +// to highlight in different colors. +// +//go:noinline +func debugf(s string, a ...any) { + if !debugLogging { return } - switch xerr := v.(type) { - case nil: + if len(s) == 0 { + return + } + if s[len(s)-1] != '\n' { + s = s + "\n" + } + if len(a) > 0 { + switch a[0] { + case hlBLACK, hlRED, hlGREEN, hlYELLOW, hlBLUE, hlPURPLE, hlCYAN, hlWHITE: + s = a[0].(string) + s + hlSFX + a = a[1:] + } + } + fmt.Printf(s, a...) +} + +func panicToErr(h errDecorator, fn func()) (err error) { + defer panicValToErr(h, callRecoverSentinel, &err, nil, debugging) + fn() + return +} + +// panicValToErr will convert a panic value into an error +// +// err and recovered are guaranteed to be not nil +func panicValToErr(h errDecorator, recovered interface{}, err, errCopy *error, panicAgain bool) { + if recovered == callRecoverSentinel { + recovered = recover() + } + if recovered == nil || err == nil { + return + } + if recovered == *err { + goto HANDLE_COPY + } + switch xerr := recovered.(type) { + case *outOfBoundsError: + h.wrapErr(xerr, err) case runtime.Error: - d, dok := h.(*Decoder) - if dok && d.bytes && isSliceBoundsError(xerr.Error()) { - *err = io.ErrUnexpectedEOF - } else { + switch d := h.(type) { + case decoderI: + if d.isBytes() && isSliceBoundsError(xerr.Error()) { + // *err = io.ErrUnexpectedEOF + h.wrapErr(io.ErrUnexpectedEOF, err) + } else { + h.wrapErr(xerr, err) + } + default: h.wrapErr(xerr, err) } case error: @@ -2481,62 +2609,55 @@ func panicValToErr(h errDecorator, v interface{}, err *error) { case nil: case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: // treat as special (bubble up) - *err = xerr + // *err = xerr + h.wrapErr(xerr, err) default: h.wrapErr(xerr, err) } + case string: + h.wrapErr(errors.New(xerr), err) + // *err = errors.New(xerr) default: // we don't expect this to happen (as this library always panics with an error) - h.wrapErr(fmt.Errorf("%v", v), err) + h.wrapErr(fmt.Errorf("%v", recovered), err) + } +HANDLE_COPY: + if errCopy != nil { + *errCopy = *err + } + if panicAgain { + panic(*err) } } -func usableByteSlice(bs []byte, slen int) (out []byte, changed bool) { +func usableByteSlice(bs []byte, slen int) (out []byte, isMadeNew bool) { const maxCap = 1024 * 1024 * 64 // 64MB - const skipMaxCap = false // allow to test - if slen <= 0 { - return []byte{}, true + // const skipMaxCap = false // allow to test + + // if slen <= 0 { + // return bs[:0], false // return zeroByteSlice, true + // } + + // slen=0 means it's defined-length of 0. + // slen<0 means non-defined length which would be determined in future. + + // if bs is nil, for length=0, ensure we don't return a nil []byte, + // which will cause DecodeBytes (caller) to return a nil []byte incorrectly. + if slen == 0 { + return zeroByteSlice, false + } + if slen < 0 { + return bs[:0], false } if slen <= cap(bs) { return bs[:slen], false } // slen > cap(bs) ... handle memory overload appropriately - if skipMaxCap || slen <= maxCap { - return make([]byte, slen), true - } - return make([]byte, maxCap), true -} - -func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind { - return mapKeyFastKindVals[k&31] -} - -// ---- - -type codecFnInfo struct { - ti *typeInfo - xfFn Ext - xfTag uint64 - addrD bool - addrDf bool // force: if addrD, then decode function MUST take a ptr - addrE bool - // addrEf bool // force: if addrE, then encode function MUST take a ptr -} - -// codecFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type codecFn struct { - i codecFnInfo - fe func(*Encoder, *codecFnInfo, reflect.Value) - fd func(*Decoder, *codecFnInfo, reflect.Value) - // _ [1]uint64 // padding (cache-aligned) -} - -type codecRtidFn struct { - rtid uintptr - fn *codecFn + return make([]byte, min(slen, maxCap)), true + // if skipMaxCap || slen <= maxCap { + // return make([]byte, slen), true + // } + // return make([]byte, maxCap), true } func makeExt(ext interface{}) Ext { @@ -2552,12 +2673,25 @@ func makeExt(ext interface{}) Ext { } func baseRV(v interface{}) (rv reflect.Value) { + // MARKER TODO try using rv4i not reflect.ValueOf // use reflect.ValueOf, not rv4i, as of go 1.16beta, rv4i was not inlineable for rv = reflect.ValueOf(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() { } return } +func baseRVRV(v reflect.Value) (rv reflect.Value) { + for rv = v; rv.Kind() == reflect.Ptr; rv = rv.Elem() { + } + return +} + +func baseRT(v reflect.Type) (vv reflect.Type) { + for vv = v; vv.Kind() == reflect.Ptr; vv = vv.Elem() { + } + return +} + // ---- // these "checkOverflow" functions must be inlinable, and not call anybody. @@ -2613,25 +2747,25 @@ func (checkOverflow) SignedInt(v uint64) (overflow bool) { func (x checkOverflow) Float32V(v float64) float64 { if x.Float32(v) { - halt.errorf("float32 overflow: %v", v) + halt.errorFloat("float32 overflow: ", v) } return v } func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { if x.Uint(v, bitsize) { - halt.errorf("uint64 overflow: %v", v) + halt.errorUint("uint64 overflow: ", v) } return v } func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { if x.Int(v, bitsize) { - halt.errorf("int64 overflow: %v", v) + halt.errorInt("int64 overflow: ", v) } return v } func (x checkOverflow) SignedIntV(v uint64) int64 { if x.SignedInt(v) { - halt.errorf("uint64 to int64 overflow: %v", v) + halt.errorUint("uint64 to int64 overflow: ", v) } return int64(v) } @@ -2660,6 +2794,91 @@ func isNumberChar(v byte) bool { // ----------------------- +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func halfFloatToFloatBits(h uint16) (f uint32) { + // retrofitted from: + // - OGRE (Object-Oriented Graphics Rendering Engine) + // function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html + + s := uint32(h >> 15) + m := uint32(h & 0x03ff) + e := int32((h >> 10) & 0x1f) + + if e == 0 { + if m == 0 { // plus or minus 0 + return s << 31 + } + // Denormalized number -- renormalize it + for (m & 0x0400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + m &= ^uint32(0x0400) + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } + return (s << 31) | 0x7f800000 | (m << 13) // NaN + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (uint32(e) << 23) | m +} + +func floatToHalfFloatBits(i uint32) (h uint16) { + // retrofitted from: + // - OGRE (Object-Oriented Graphics Rendering Engine) + // function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html + // - http://www.java2s.com/example/java-utility-method/float-to/floattohalf-float-f-fae00.html + s := (i >> 16) & 0x8000 + e := int32(((i >> 23) & 0xff) - (127 - 15)) + m := i & 0x7fffff + + var h32 uint32 + + if e <= 0 { + if e < -10 { // zero + h32 = s // track -0 vs +0 + } else { + m = (m | 0x800000) >> uint32(1-e) + h32 = s | (m >> 13) + } + } else if e == 0xff-(127-15) { + if m == 0 { // Inf + h32 = s | 0x7c00 + } else { // NAN + m >>= 13 + var me uint32 + if m == 0 { + me = 1 + } + h32 = s | 0x7c00 | m | me + } + } else { + if e > 30 { // Overflow + h32 = s | 0x7c00 + } else { + h32 = s | (uint32(e) << 10) | (m >> 13) + } + } + h = uint16(h32) + return +} + +// ----------------------- + type ioFlusher interface { Flush() error } @@ -2719,31 +2938,67 @@ func (x *bitset256) isset(pos byte) bool { // ------------ +// panicHdl will panic with the parameters passed. type panicHdl struct{} -// errorv will panic if err is defined (not nil) func (panicHdl) onerror(err error) { if err != nil { panic(err) } } +func (panicHdl) error(err error) { panic(err) } + +func (panicHdl) errorStr(s string) { panic(s) } + +func (panicHdl) errorStr2(s, s2 string) { panic(s + s2) } + +func (panicHdl) errorBytes(s string, p1 []byte) { panic(s + stringView(p1)) } + +func (v panicHdl) errorByte(prefix string, p1 byte) { + panic(stringView(append(panicHdlBytes(prefix), p1))) +} + +func (v panicHdl) errorInt(prefix string, p1 int64) { + panic(stringView(strconv.AppendInt(panicHdlBytes(prefix), p1, 10))) + // bs := make([]byte, len(prefix)+8) + // bs = append(bs, prefix...) + // bs = strconv.AppendInt(bs, p1, 10) + // panic(stringView(bs)) +} + +func (v panicHdl) errorUint(prefix string, p1 uint64) { + panic(stringView(strconv.AppendUint(panicHdlBytes(prefix), p1, 10))) +} + +func (v panicHdl) errorFloat(prefix string, p1 float64) { + panic(stringView(strconv.AppendFloat(panicHdlBytes(prefix), p1, 'G', -1, 64))) +} + +// MARKER +// consider adding //go:noinline to errorf and maybe other methods + // errorf will always panic, using the parameters passed. // // Note: it is ok to pass in a stringView, as it will just pass it directly // to a fmt.Sprintf call and not hold onto it. // -//go:noinline +// Since this is an unexported call, we will not be defensive. +// Callers should ensure a non-empty string and 1+ parameter. func (panicHdl) errorf(format string, params ...interface{}) { - if format == "" { - panic(errPanicUndefined) - } - if len(params) == 0 { - panic(errors.New(format)) - } + // if format == "" { + // panic(errPanicUndefined) + // } + // if len(params) == 0 { + // panic(errors.New(format)) + // } panic(fmt.Errorf(format, params...)) } +func panicHdlBytes(prefix string) []byte { + return append(make([]byte, len(prefix)+8), prefix...) +} + // ---------------------------------------------------- type errDecorator interface { @@ -2778,12 +3033,12 @@ func (mustHdl) Float(s float64, err error) float64 { // ------------------- func freelistCapacity(length int) (capacity int) { - for capacity = 8; capacity <= length; capacity *= 2 { + for capacity = 8; capacity < length; capacity *= 2 { } return } -// bytesFreelist is a list of byte buffers, sorted by cap. +// bytesFreeList is a list of byte buffers, sorted by cap. // // In anecdotal testing (running go test -tsd 1..6), we couldn't get // the length of the list > 4 at any time. So we believe a linear search @@ -2807,12 +3062,12 @@ func freelistCapacity(length int) (capacity int) { // if !byteSliceSameData(v0, v1) { // blist.put(v0) // } -type bytesFreelist [][]byte +type bytesFreeList [][]byte // peek returns a slice of possibly non-zero'ed bytes, with len=0, // and with the largest capacity from the list. -func (x *bytesFreelist) peek(length int, pop bool) (out []byte) { - if bytesFreeListNoCache { +func (x *bytesFreeList) peek(length int, pop bool) (out []byte) { + if !useBytesFreeList { return make([]byte, 0, freelistCapacity(length)) } y := *x @@ -2838,15 +3093,12 @@ func (x *bytesFreelist) peek(length int, pop bool) (out []byte) { // get returns a slice of possibly non-zero'ed bytes, with len=0, // and with cap >= length requested. -func (x *bytesFreelist) get(length int) (out []byte) { - if bytesFreeListNoCache { +func (x *bytesFreeList) get(length int) (out []byte) { + if !useBytesFreeList { return make([]byte, 0, freelistCapacity(length)) } y := *x - // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta - // for i, v := range y { - for i := 0; i < len(y); i++ { - v := y[i] + for i, v := range y { if cap(v) >= length { // *x = append(y[:i], y[i+1:]...) copy(y[i:], y[i+1:]) @@ -2857,21 +3109,21 @@ func (x *bytesFreelist) get(length int) (out []byte) { return make([]byte, 0, freelistCapacity(length)) } -func (x *bytesFreelist) put(v []byte) { - if bytesFreeListNoCache || cap(v) == 0 { +func (x *bytesFreeList) put(v []byte) { + if !useBytesFreeList || cap(v) == 0 { return } if len(v) != 0 { v = v[:0] } + // v = v[:0] // append the new value, then try to put it in a better position y := append(*x, v) *x = y - // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta + // MARKER: use simple for loop, so as not to create new slice // for i, z := range y[:len(y)-1] { for i := 0; i < len(y)-1; i++ { - z := y[i] - if cap(z) > cap(v) { + if cap(y[i]) > cap(v) { copy(y[i+1:], y[i:]) y[i] = v return @@ -2879,24 +3131,23 @@ func (x *bytesFreelist) put(v []byte) { } } -func (x *bytesFreelist) check(v []byte, length int) (out []byte) { +func (x *bytesFreeList) check(v []byte, length int) (out []byte) { // ensure inlineable, by moving slow-path out to its own function if cap(v) >= length { return v[:0] } - return x.checkPutGet(v, length) + return x.putGet(v, length) } -func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte { - // checkPutGet broken out into its own function, so check is inlineable in general case - const useSeparateCalls = false +func (x *bytesFreeList) putGet(v []byte, length int) []byte { + // putGet broken out into its own function, so check is inlineable in general case - if useSeparateCalls { + if useBytesFreeListPutGetSeparateCalls { x.put(v) return x.get(length) } - if bytesFreeListNoCache { + if !useBytesFreeList { return make([]byte, 0, freelistCapacity(length)) } @@ -2907,8 +3158,9 @@ func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte { y = append(y, v) *x = y } - for i := 0; i < len(y); i++ { - z := y[i] + // for i := 0; i < len(y); i++ { + // z := y[i] + for i, z := range y { if put { if cap(z) >= length { copy(y[i:], y[i+1:]) @@ -2929,7 +3181,7 @@ func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte { // ------------------------- -// sfiRvFreelist is used by Encoder for encoding structs, +// sfiRvFreeList is used by Encoder for encoding structs, // where we have to gather the fields first and then // analyze them for omitEmpty, before knowing the length of the array/map to encode. // @@ -2938,9 +3190,12 @@ func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte { // // In the general case, the length of this list at most times is 1, // so linear search is fine. -type sfiRvFreelist [][]sfiRv +type sfiRvFreeList [][]sfiRv -func (x *sfiRvFreelist) get(length int) (out []sfiRv) { +func (x *sfiRvFreeList) get(length int) (out []sfiRv) { + if !useSfiRvFreeList { + return make([]sfiRv, 0, freelistCapacity(length)) + } y := *x // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta @@ -2957,7 +3212,10 @@ func (x *sfiRvFreelist) get(length int) (out []sfiRv) { return make([]sfiRv, 0, freelistCapacity(length)) } -func (x *sfiRvFreelist) put(v []sfiRv) { +func (x *sfiRvFreeList) put(v []sfiRv) { + if !useSfiRvFreeList { + return + } if len(v) != 0 { v = v[:0] } @@ -3002,3 +3260,19 @@ func (x internerMap) string(v []byte) (s string) { } return } + +// ---- + +type bytesEncoder interface { + Encode(dst, src []byte) + Decode(dst, src []byte) (n int, err error) + EncodedLen(n int) int + DecodedLen(n int) int +} + +type hexEncoder struct{} + +func (hexEncoder) Encode(dst, src []byte) { hex.Encode(dst, src) } +func (hexEncoder) Decode(dst, src []byte) (n int, err error) { return hex.Decode(dst, src) } +func (hexEncoder) EncodedLen(n int) int { return hex.EncodedLen(n) } +func (hexEncoder) DecodedLen(n int) int { return hex.DecodedLen(n) } diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go deleted file mode 100644 index e646249c7..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_internal.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// maxArrayLen is the size of uint, which determines -// the maximum length of any array. -const maxArrayLen = 1<<((32<<(^uint(0)>>63))-1) - 1 - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func halfFloatToFloatBits(h uint16) (f uint32) { - // retrofitted from: - // - OGRE (Object-Oriented Graphics Rendering Engine) - // function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html - - s := uint32(h >> 15) - m := uint32(h & 0x03ff) - e := int32((h >> 10) & 0x1f) - - if e == 0 { - if m == 0 { // plus or minus 0 - return s << 31 - } - // Denormalized number -- renormalize it - for (m & 0x0400) == 0 { - m <<= 1 - e -= 1 - } - e += 1 - m &= ^uint32(0x0400) - } else if e == 31 { - if m == 0 { // Inf - return (s << 31) | 0x7f800000 - } - return (s << 31) | 0x7f800000 | (m << 13) // NaN - } - e = e + (127 - 15) - m = m << 13 - return (s << 31) | (uint32(e) << 23) | m -} - -func floatToHalfFloatBits(i uint32) (h uint16) { - // retrofitted from: - // - OGRE (Object-Oriented Graphics Rendering Engine) - // function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html - // - http://www.java2s.com/example/java-utility-method/float-to/floattohalf-float-f-fae00.html - s := (i >> 16) & 0x8000 - e := int32(((i >> 23) & 0xff) - (127 - 15)) - m := i & 0x7fffff - - var h32 uint32 - - if e <= 0 { - if e < -10 { // zero - h32 = s // track -0 vs +0 - } else { - m = (m | 0x800000) >> uint32(1-e) - h32 = s | (m >> 13) - } - } else if e == 0xff-(127-15) { - if m == 0 { // Inf - h32 = s | 0x7c00 - } else { // NAN - m >>= 13 - var me uint32 - if m == 0 { - me = 1 - } - h32 = s | 0x7c00 | m | me - } - } else { - if e > 30 { // Overflow - h32 = s | 0x7c00 - } else { - h32 = s | (uint32(e) << 10) | (m >> 13) - } - } - h = uint16(h32) - return -} - -// growCap will return a new capacity for a slice, given the following: -// - oldCap: current capacity -// - unit: in-memory size of an element -// - num: number of elements to add -func growCap(oldCap, unit, num uint) (newCap uint) { - // appendslice logic (if cap < 1024, *2, else *1.25): - // leads to many copy calls, especially when copying bytes. - // bytes.Buffer model (2*cap + n): much better for bytes. - // smarter way is to take the byte-size of the appended element(type) into account - - // maintain 1 thresholds: - // t1: if cap <= t1, newcap = 2x - // else newcap = 1.5x - // - // t1 is always >= 1024. - // This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same) - // - // With this, appending for bytes increase by: - // 100% up to 4K - // 50% beyond that - - // unit can be 0 e.g. for struct{}{}; handle that appropriately - maxCap := num + (oldCap * 3 / 2) - if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc - return maxArrayLen - } - - var t1 uint = 1024 // default thresholds for large values - if unit <= 4 { - t1 = 8 * 1024 - } else if unit <= 16 { - t1 = 2 * 1024 - } - - newCap = 2 + num - if oldCap > 0 { - if oldCap <= t1 { // [0,t1] - newCap = num + (oldCap * 2) - } else { // (t1,infinity] - newCap = maxCap - } - } - - // ensure newCap takes multiples of a cache line (size is a multiple of 64) - t1 = newCap * unit - if t2 := t1 % 64; t2 != 0 { - t1 += 64 - t2 - newCap = t1 / unit - } - - return -} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go index 10034b86f..413dc39de 100644 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a MIT license found in the LICENSE file. //go:build !go1.9 || safe || codec.safe || appengine -// +build !go1.9 safe codec.safe appengine package codec @@ -19,8 +18,11 @@ import ( const safeMode = true -const transientSizeMax = 0 -const transientValueHasStringSlice = true +func isTransientType4Size(size uint32) bool { return true } + +type mapReqParams struct{} + +func getMapReqParams(ti *typeInfo) (r mapReqParams) { return } func byteAt(b []byte, index uint) byte { return b[index] @@ -30,14 +32,6 @@ func setByteAt(b []byte, index uint, val byte) { b[index] = val } -func byteSliceOf(b []byte, start, end uint) []byte { - return b[start:end] -} - -// func byteSliceWithLen(b []byte, length uint) []byte { -// return b[:length] -// } - func stringView(v []byte) string { return string(v) } @@ -50,34 +44,26 @@ func byteSliceSameData(v1 []byte, v2 []byte) bool { return cap(v1) != 0 && cap(v2) != 0 && &(v1[:1][0]) == &(v2[:1][0]) } -func okBytes2(b []byte) (v [2]byte) { - copy(v[:], b) - return -} - -func okBytes3(b []byte) (v [3]byte) { - copy(v[:], b) - return -} - -func okBytes4(b []byte) (v [4]byte) { - copy(v[:], b) - return -} - -func okBytes8(b []byte) (v [8]byte) { - copy(v[:], b) - return -} - -func isNil(v interface{}) (rv reflect.Value, isnil bool) { +func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) { + b = v == nil + if b || !checkPtr { + return + } rv = reflect.ValueOf(v) - if isnilBitset.isset(byte(rv.Kind())) { - isnil = rv.IsNil() + if rv.Kind() == reflect.Ptr { + b = rv.IsNil() } return } +func ptrToLowLevel(v interface{}) interface{} { + return v +} + +func lowLevelToPtr[T any](v interface{}) *T { + return v.(*T) +} + func eq4i(i0, i1 interface{}) bool { return i0 == i1 } @@ -85,17 +71,21 @@ func eq4i(i0, i1 interface{}) bool { func rv4iptr(i interface{}) reflect.Value { return reflect.ValueOf(i) } func rv4istr(i interface{}) reflect.Value { return reflect.ValueOf(i) } -// func rv4i(i interface{}) reflect.Value { return reflect.ValueOf(i) } -// func rv4iK(i interface{}, kind byte, isref bool) reflect.Value { return reflect.ValueOf(i) } - func rv2i(rv reflect.Value) interface{} { - return rv.Interface() + if rv.IsValid() { + return rv.Interface() + } + return nil } func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value { return rv.Addr() } +func rvPtrIsNil(rv reflect.Value) bool { + return rv.IsNil() +} + func rvIsNil(rv reflect.Value) bool { return rv.IsNil() } @@ -131,6 +121,30 @@ func i2rtid(i interface{}) uintptr { // -------------------------- +// is this an empty interface/ptr/struct/map/slice/chan/array +func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) (empty bool) { + switch v.Kind() { + case reflect.Array: + for i, vlen := 0, v.Len(); i < vlen; i++ { + if !isEmptyValue(v.Index(i), tinfos, false) { + return false + } + } + return true + case reflect.Map, reflect.Slice, reflect.Chan: + return v.IsNil() || v.Len() == 0 + case reflect.Interface, reflect.Ptr: + empty = v.IsNil() + if recursive && !empty { + return isEmptyValue(v.Elem(), tinfos, recursive) + } + return empty + case reflect.Struct: + return isEmptyStruct(v, tinfos, recursive) + } + return false +} + func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool { switch v.Kind() { case reflect.Invalid: @@ -215,7 +229,7 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool { // We only care about what we can encode/decode, // so that is what we use to check omitEmpty. for _, si := range ti.sfi.source() { - sfv := si.path.field(v) + sfv := si.fieldNoAlloc(v, true) if sfv.IsValid() && !isEmptyValue(sfv, tinfos, recursive) { return false } @@ -223,6 +237,10 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool { return true } +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMapWithSize(t, size) +} + // -------------------------- type perTypeElem struct { @@ -247,13 +265,9 @@ type perType struct { v []perTypeElem } -type decPerType struct { - perType -} +type decPerType = perType -type encPerType struct { - perType -} +type encPerType = perType func (x *perType) elem(t reflect.Type) *perTypeElem { rtid := rt2id(t) @@ -295,10 +309,44 @@ func (x *perType) AddressableRO(v reflect.Value) (rv reflect.Value) { return } +// -------------------------- +type mapIter struct { + t *reflect.MapIter + m reflect.Value + values bool +} + +func (t *mapIter) Next() (r bool) { + return t.t.Next() +} + +func (t *mapIter) Key() reflect.Value { + return t.t.Key() +} + +func (t *mapIter) Value() (r reflect.Value) { + if t.values { + return t.t.Value() + } + return +} + +func (t *mapIter) Done() {} + +func mapRange(t *mapIter, m, k, v reflect.Value, values bool) { + *t = mapIter{ + m: m, + t: m.MapRange(), + values: values, + } +} + // -------------------------- type structFieldInfos struct { c []*structFieldInfo s []*structFieldInfo + t uint8To32TrieNode + // byName map[string]*structFieldInfo // find sfi given a name } func (x *structFieldInfos) load(source, sorted []*structFieldInfo) { @@ -306,55 +354,24 @@ func (x *structFieldInfos) load(source, sorted []*structFieldInfo) { x.s = sorted } -func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s } +// func (x *structFieldInfos) count() int { return len(x.c) } func (x *structFieldInfos) source() (v []*structFieldInfo) { return x.c } - -type atomicClsErr struct { - v atomic.Value -} - -func (x *atomicClsErr) load() (e clsErr) { - if i := x.v.Load(); i != nil { - e = i.(clsErr) - } - return -} - -func (x *atomicClsErr) store(p clsErr) { - x.v.Store(p) -} +func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s } // -------------------------- -type atomicTypeInfoSlice struct { - v atomic.Value + +type uint8To32TrieNodeNoKids struct { + key uint8 + valid bool // the value marks the end of a full stored string + _ [2]byte // padding + value uint32 } -func (x *atomicTypeInfoSlice) load() (e []rtid2ti) { - if i := x.v.Load(); i != nil { - e = i.([]rtid2ti) - } - return -} +type uint8To32TrieNodeKids = []uint8To32TrieNode -func (x *atomicTypeInfoSlice) store(p []rtid2ti) { - x.v.Store(p) -} - -// -------------------------- -type atomicRtidFnSlice struct { - v atomic.Value -} - -func (x *atomicRtidFnSlice) load() (e []codecRtidFn) { - if i := x.v.Load(); i != nil { - e = i.([]codecRtidFn) - } - return -} - -func (x *atomicRtidFnSlice) store(p []codecRtidFn) { - x.v.Store(p) -} +func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) { x.kids = kids } +func (x *uint8To32TrieNode) getKids() []uint8To32TrieNode { return x.kids } +func (x *uint8To32TrieNode) truncKids() { x.kids = x.kids[:0] } // set len to 0 // -------------------------- func (n *fauxUnion) ru() reflect.Value { @@ -501,13 +518,13 @@ func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value // ---------------- -func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value { +func rvArrayIndex(rv reflect.Value, i int, _ *typeInfo, _ bool) reflect.Value { return rv.Index(i) } -func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value { - return rv.Index(i) -} +// func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value { +// return rv.Index(i) +// } func rvSliceZeroCap(t reflect.Type) (v reflect.Value) { return reflect.MakeSlice(t, 0, 0) @@ -523,7 +540,7 @@ func rvCapSlice(rv reflect.Value) int { func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) { l := rv.Len() - if scratch == nil || rv.CanAddr() { + if scratch == nil && rv.CanAddr() { return rv.Slice(0, l).Bytes() } @@ -537,7 +554,7 @@ func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) { } func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) { - v = rvZeroAddrK(reflectArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array) + v = rvZeroAddrK(reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array) reflect.Copy(v, rv) return } @@ -647,60 +664,43 @@ func rvLenMap(rv reflect.Value) int { return rv.Len() } -// func copybytes(to, from []byte) int { -// return copy(to, from) -// } - -// func copybytestr(to []byte, from string) int { -// return copy(to, from) -// } - -// func rvLenArray(rv reflect.Value) int { return rv.Len() } - // ------------ map range and map indexing ---------- -func mapStoresElemIndirect(elemsize uintptr) bool { return false } - -func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) { +func mapSet(m, k, v reflect.Value, _ mapReqParams) { m.SetMapIndex(k, v) } -func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) (vv reflect.Value) { +func mapGet(m, k, v reflect.Value, _ mapReqParams) (vv reflect.Value) { return m.MapIndex(k) } -// func mapDelete(m, k reflect.Value) { -// m.SetMapIndex(k, reflect.Value{}) -// } - func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (r reflect.Value) { return // reflect.New(t).Elem() } // ---------- ENCODER optimized --------------- -func (e *Encoder) jsondriver() *jsonEncDriver { - return e.e.(*jsonEncDriver) -} - -// ---------- DECODER optimized --------------- - -func (d *Decoder) jsondriver() *jsonDecDriver { - return d.d.(*jsonDecDriver) -} - -func (d *Decoder) stringZC(v []byte) (s string) { - return d.string(v) -} - -func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string { - return d.string(*kstr2bs) +func (d *decoderBase) bytes2Str(in []byte, att dBytesAttachState) (s string, mutable bool) { + return d.detach2Str(in, att), false } // ---------- structFieldInfo optimized --------------- -func (n *structFieldInfoPathNode) rvField(v reflect.Value) reflect.Value { +func (n *structFieldInfoNode) rvField(v reflect.Value) reflect.Value { return v.Field(int(n.index)) } // ---------- others --------------- + +// -------------------------- +type atomicRtidFnSlice struct { + v atomic.Value +} + +func (x *atomicRtidFnSlice) load() interface{} { + return x.v.Load() +} + +func (x *atomicRtidFnSlice) store(p interface{}) { + x.v.Store(p) +} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go deleted file mode 100644 index 502bc6086..000000000 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !go1.9 || safe || codec.safe || appengine || !gc -// +build !go1.9 safe codec.safe appengine !gc - -package codec - -// import "reflect" - -// This files contains safe versions of the code where the unsafe versions are not supported -// in either gccgo or gollvm. -// -// - rvType: -// reflect.toType is not supported in gccgo, gollvm. - -// func rvType(rv reflect.Value) reflect.Type { -// return rv.Type() -// } - -var _ = 0 diff --git a/vendor/github.com/ugorji/go/codec/helper_notunsafe_or_notgc.go b/vendor/github.com/ugorji/go/codec/helper_notunsafe_or_notgc.go new file mode 100644 index 000000000..420d717e7 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_notunsafe_or_notgc.go @@ -0,0 +1,59 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build safe || codec.safe || !gc + +package codec + +// growCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num uint) (newCap uint) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 1 thresholds: + // t1: if cap <= t1, newcap = 2x + // else newcap = 1.5x + // + // t1 is always >= 1024. + // This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 50% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + maxCap := num + (oldCap * 3 / 2) + if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc + return maxArrayLen + } + + var t1 uint = 1024 // default thresholds for large values + if unit <= 4 { + t1 = 8 * 1024 + } else if unit <= 16 { + t1 = 2 * 1024 + } + + newCap = 2 + num + if oldCap > 0 { + if oldCap <= t1 { // [0,t1] + newCap = num + (oldCap * 2) + } else { // (t1,infinity] + newCap = maxCap + } + } + + // ensure newCap takes multiples of a cache line (size is a multiple of 64) + t1 = newCap * unit + if t2 := t1 % 64; t2 != 0 { + t1 += 64 - t2 + newCap = t1 / unit + } + + return +} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go index 4e29b030b..31f3266a4 100644 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -1,12 +1,15 @@ // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. -//go:build !safe && !codec.safe && !appengine && go1.9 -// +build !safe,!codec.safe,!appengine,go1.9 +//go:build !safe && !codec.safe && !appengine && go1.21 -// minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need -// - typedmemclr was introduced in go 1.8 -// - mapassign_fastXXX was introduced in go 1.9 +// minimum of go 1.21 is needed, as that is the minimum for all features and linked functions we need +// - typedmemclr : go1.8 +// - mapassign_fastXXX: go1.9 +// - clear was added in go1.21 +// - unsafe.String(Data): go1.20 +// - unsafe.Add: go1.17 +// - generics/any: go1.18 // etc package codec @@ -21,7 +24,7 @@ import ( // This file has unsafe variants of some helper functions. // MARKER: See helper_unsafe.go for the usage documentation. - +// // There are a number of helper_*unsafe*.go files. // // - helper_unsafe @@ -41,19 +44,32 @@ import ( // As of March 2021, we cannot differentiate whether running with gccgo or gollvm // using a build constraint, as both satisfy 'gccgo' build tag. // Consequently, we must use the lowest common denominator to support both. - +// // For reflect.Value code, we decided to do the following: // - if we know the kind, we can elide conditional checks for // - SetXXX (Int, Uint, String, Bool, etc) // - SetLen // -// We can also optimize -// - IsNil - +// We can also optimize many others, incl IsNil, etc +// // MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g. // - rvCopySlice: called by decode if rvGrowSlice did not set new slice into pointer to orig slice. // however, helper_unsafe sets it, so no need to call rvCopySlice later // - rvSlice: same as above +// +// MARKER: Handling flagIndir ---- +// +// flagIndir means that the reflect.Value holds a pointer to the data itself. +// +// flagIndir can be set for: +// - references +// Here, type.IfaceIndir() --> false +// flagIndir is usually false (except when the value is addressable, where in flagIndir may be true) +// - everything else (numbers, bools, string, slice, struct, etc). +// Here, type.IfaceIndir() --> true +// flagIndir is always true +// +// This knowlege is used across this file, e.g. in rv2i and rvRefPtr const safeMode = false @@ -88,7 +104,9 @@ const ( const transientSizeMax = 64 // should struct/array support internal strings and slices? -const transientValueHasStringSlice = false +// const transientValueHasStringSlice = false + +func isTransientType4Size(size uint32) bool { return size <= transientSizeMax } type unsafeString struct { Data unsafe.Pointer @@ -144,7 +162,8 @@ func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer { x.slice = unsafeSlice{} // memclr return unsafe.Pointer(&x.slice) } - x.arr = [transientSizeMax]byte{} // memclr + clear(x.arr[:]) + // x.arr = [transientSizeMax]byte{} // memclr return unsafe.Pointer(&x.arr) } @@ -152,9 +171,7 @@ type perType struct { elems [2]unsafePerTypeElem } -type decPerType struct { - perType -} +type decPerType = perType type encPerType struct{} @@ -183,19 +200,6 @@ func byteAt(b []byte, index uint) byte { return *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index))) } -func byteSliceOf(b []byte, start, end uint) []byte { - s := (*unsafeSlice)(unsafe.Pointer(&b)) - s.Data = unsafe.Pointer(uintptr(s.Data) + uintptr(start)) - s.Len = int(end - start) - s.Cap -= int(start) - return b -} - -// func byteSliceWithLen(b []byte, length uint) []byte { -// (*unsafeSlice)(unsafe.Pointer(&b)).Len = int(length) -// return b -// } - func setByteAt(b []byte, index uint, val byte) { // b[index] = val *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index))) = val @@ -222,49 +226,26 @@ func byteSliceSameData(v1 []byte, v2 []byte) bool { return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data } -// MARKER: okBytesN functions will copy N bytes into the top slots of the return array. -// These functions expect that the bound check already occured and are are valid. -// copy(...) does a number of checks which are unnecessary in this situation when in bounds. - -func okBytes2(b []byte) [2]byte { - return *((*[2]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data)) -} - -func okBytes3(b []byte) [3]byte { - return *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data)) -} - -func okBytes4(b []byte) [4]byte { - return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data)) -} - -func okBytes8(b []byte) [8]byte { - return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data)) -} - -// isNil says whether the value v is nil. -// This applies to references like map/ptr/unsafepointer/chan/func, -// and non-reference values like interface/slice. -func isNil(v interface{}) (rv reflect.Value, isnil bool) { - var ui = (*unsafeIntf)(unsafe.Pointer(&v)) - isnil = ui.ptr == nil - if !isnil { - rv, isnil = unsafeIsNilIntfOrSlice(ui, v) - } - return -} - -func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) { - rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly - tk := rv.Kind() - isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil - return -} - -// return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer). -// true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir +// isNil checks - without much effort - if an interface is nil. // -// Assumes that v is a reference (map/func/chan/ptr/func) +// returned rv is not guaranteed to be valid (e.g. if v == nil). +// +// Note that this will handle all pointer-sized types e.g. +// pointer, map, chan, func, etc. +func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) { + b = ((*unsafeIntf)(unsafe.Pointer(&v))).ptr == nil + return +} + +func ptrToLowLevel[T any](ptr *T) unsafe.Pointer { + return unsafe.Pointer(ptr) +} + +func lowLevelToPtr[T any](v unsafe.Pointer) *T { + return (*T)(v) +} + +// Given that v is a reference (map/func/chan/ptr/unsafepointer) kind, return the pointer func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer { if v.flag&unsafeFlagIndir != 0 { return *(*unsafe.Pointer)(v.ptr) @@ -295,13 +276,6 @@ func rv4istr(i interface{}) (v reflect.Value) { } func rv2i(rv reflect.Value) (i interface{}) { - // We tap into implememtation details from - // the source go stdlib reflect/value.go, and trims the implementation. - // - // e.g. - // - a map/ptr is a reference, thus flagIndir is not set on it - // - an int/slice is not a reference, thus flagIndir is set on it - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 { urv.ptr = *(*unsafe.Pointer)(urv.ptr) @@ -316,12 +290,22 @@ func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value { return rv } +// return true if this rv - got from a pointer kind - is nil. +// For now, only use for struct fields of pointer types, as we're guaranteed +// that flagIndir will never be set. +func rvPtrIsNil(rv reflect.Value) bool { + return rvIsNil(rv) +} + +// checks if a nil'able value is nil func rvIsNil(rv reflect.Value) bool { urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - if urv.flag&unsafeFlagIndir != 0 { - return *(*unsafe.Pointer)(urv.ptr) == nil + if urv.flag&unsafeFlagIndir == 0 { + return urv.ptr == nil } - return urv.ptr == nil + // flagIndir is set for a reference (ptr/map/func/unsafepointer/chan) + // OR kind is slice/interface + return *(*unsafe.Pointer)(urv.ptr) == nil } func rvSetSliceLen(rv reflect.Value, length int) { @@ -499,29 +483,62 @@ func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos return false } +// is this an empty interface/ptr/struct/map/slice/chan/array +func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool { + urv := (*unsafeReflectValue)(unsafe.Pointer(&v)) + switch v.Kind() { + case reflect.Slice: + return (*unsafeSlice)(urv.ptr).Len == 0 + case reflect.Struct: + if tinfos == nil { + tinfos = defTypeInfos + } + ti := tinfos.find(uintptr(urv.typ)) + if ti == nil { + ti = tinfos.load(v.Type()) + } + return unsafeCmpZero(urv.ptr, int(ti.size)) + case reflect.Interface, reflect.Ptr: + // isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type) + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if recursive && !isnil { + return isEmptyValue(v.Elem(), tinfos, recursive) + } + return isnil + case reflect.Chan: + return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0 + case reflect.Map: + return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0 + case reflect.Array: + return v.Len() == 0 || + urv.ptr == nil || + urv.typ == nil || + rtsize2(urv.typ) == 0 || + unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ))) + } + return false +} + // -------------------------- type structFieldInfos struct { - c unsafe.Pointer // source - s unsafe.Pointer // sorted + c unsafe.Pointer // source + s unsafe.Pointer // sorted + t uint8To32TrieNode + length int + + // byName map[string]*structFieldInfo // find sfi given a name } +// func (x *structFieldInfos) load(source, sorted []*structFieldInfo, sourceNames, sortedNames []string) { func (x *structFieldInfos) load(source, sorted []*structFieldInfo) { - s := (*unsafeSlice)(unsafe.Pointer(&sorted)) - x.s = s.Data - x.length = s.Len + var s *unsafeSlice s = (*unsafeSlice)(unsafe.Pointer(&source)) x.c = s.Data -} - -func (x *structFieldInfos) sorted() (v []*structFieldInfo) { - *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length} - // s := (*unsafeSlice)(unsafe.Pointer(&v)) - // s.Data = x.sorted0 - // s.Len = x.length - // s.Cap = s.Len - return + x.length = s.Len + s = (*unsafeSlice)(unsafe.Pointer(&sorted)) + x.s = s.Data } func (x *structFieldInfos) source() (v []*structFieldInfo) { @@ -529,66 +546,48 @@ func (x *structFieldInfos) source() (v []*structFieldInfo) { return } -// atomicXXX is expected to be 2 words (for symmetry with atomic.Value) -// -// Note that we do not atomically load/store length and data pointer separately, -// as this could lead to some races. Instead, we atomically load/store cappedSlice. -// -// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly. - -// ---------------------- -type atomicTypeInfoSlice struct { - v unsafe.Pointer // *[]rtid2ti -} - -func (x *atomicTypeInfoSlice) load() (s []rtid2ti) { - x2 := atomic.LoadPointer(&x.v) - if x2 != nil { - s = *(*[]rtid2ti)(x2) - } +func (x *structFieldInfos) sorted() (v []*structFieldInfo) { + *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length} return } -func (x *atomicTypeInfoSlice) store(p []rtid2ti) { - atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +// -------------------------- + +type uint8To32TrieNodeNoKids struct { + key uint8 + valid bool // the value marks the end of a full stored string + numkids uint8 + _ byte // padding + value uint32 } -// MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}. -// This is 2 words. -// consider padding atomicXXX here with a uintptr, so they fit into 2 words also. +type uint8To32TrieNodeKids = *uint8To32TrieNode + +func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) { + x.numkids = uint8(len(kids)) + x.kids = &kids[0] +} +func (x *uint8To32TrieNode) getKids() (v []uint8To32TrieNode) { + *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{unsafe.Pointer(x.kids), int(x.numkids), int(x.numkids)} + return +} +func (x *uint8To32TrieNode) truncKids() { x.numkids = 0 } // -------------------------- + +// Note that we do not atomically load/store length and data pointer separately, +// as this could lead to some races. Instead, we atomically load/store cappedSlice. + type atomicRtidFnSlice struct { v unsafe.Pointer // *[]codecRtidFn } -func (x *atomicRtidFnSlice) load() (s []codecRtidFn) { - x2 := atomic.LoadPointer(&x.v) - if x2 != nil { - s = *(*[]codecRtidFn)(x2) - } - return +func (x *atomicRtidFnSlice) load() (s unsafe.Pointer) { + return atomic.LoadPointer(&x.v) } -func (x *atomicRtidFnSlice) store(p []codecRtidFn) { - atomic.StorePointer(&x.v, unsafe.Pointer(&p)) -} - -// -------------------------- -type atomicClsErr struct { - v unsafe.Pointer // *clsErr -} - -func (x *atomicClsErr) load() (e clsErr) { - x2 := (*clsErr)(atomic.LoadPointer(&x.v)) - if x2 != nil { - e = *x2 - } - return -} - -func (x *atomicClsErr) store(p clsErr) { - atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +func (x *atomicRtidFnSlice) store(p unsafe.Pointer) { + atomic.StorePointer(&x.v, p) } // -------------------------- @@ -660,98 +659,79 @@ func (n *fauxUnion) rb() (v reflect.Value) { // -------------------------- func rvSetBytes(rv reflect.Value, v []byte) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*[]byte)(urv.ptr) = v + *(*[]byte)(rvPtr(rv)) = v } func rvSetString(rv reflect.Value, v string) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*string)(urv.ptr) = v + *(*string)(rvPtr(rv)) = v } func rvSetBool(rv reflect.Value, v bool) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*bool)(urv.ptr) = v + *(*bool)(rvPtr(rv)) = v } func rvSetTime(rv reflect.Value, v time.Time) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*time.Time)(urv.ptr) = v + *(*time.Time)(rvPtr(rv)) = v } func rvSetFloat32(rv reflect.Value, v float32) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*float32)(urv.ptr) = v + *(*float32)(rvPtr(rv)) = v } func rvSetFloat64(rv reflect.Value, v float64) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*float64)(urv.ptr) = v + *(*float64)(rvPtr(rv)) = v } func rvSetComplex64(rv reflect.Value, v complex64) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*complex64)(urv.ptr) = v + *(*complex64)(rvPtr(rv)) = v } func rvSetComplex128(rv reflect.Value, v complex128) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*complex128)(urv.ptr) = v + *(*complex128)(rvPtr(rv)) = v } func rvSetInt(rv reflect.Value, v int) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int)(urv.ptr) = v + *(*int)(rvPtr(rv)) = v } func rvSetInt8(rv reflect.Value, v int8) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int8)(urv.ptr) = v + *(*int8)(rvPtr(rv)) = v } func rvSetInt16(rv reflect.Value, v int16) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int16)(urv.ptr) = v + *(*int16)(rvPtr(rv)) = v } func rvSetInt32(rv reflect.Value, v int32) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int32)(urv.ptr) = v + *(*int32)(rvPtr(rv)) = v } func rvSetInt64(rv reflect.Value, v int64) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*int64)(urv.ptr) = v + *(*int64)(rvPtr(rv)) = v } func rvSetUint(rv reflect.Value, v uint) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint)(urv.ptr) = v + *(*uint)(rvPtr(rv)) = v } func rvSetUintptr(rv reflect.Value, v uintptr) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uintptr)(urv.ptr) = v + *(*uintptr)(rvPtr(rv)) = v } func rvSetUint8(rv reflect.Value, v uint8) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint8)(urv.ptr) = v + *(*uint8)(rvPtr(rv)) = v } func rvSetUint16(rv reflect.Value, v uint16) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint16)(urv.ptr) = v + *(*uint16)(rvPtr(rv)) = v } func rvSetUint32(rv reflect.Value, v uint32) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint32)(urv.ptr) = v + *(*uint32)(rvPtr(rv)) = v } func rvSetUint64(rv reflect.Value, v uint64) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - *(*uint64)(urv.ptr) = v + *(*uint64)(rvPtr(rv)) = v } // ---------------- @@ -775,12 +755,10 @@ func rvSetDirect(rv reflect.Value, v reflect.Value) { uv := (*unsafeReflectValue)(unsafe.Pointer(&v)) if uv.flag&unsafeFlagIndir == 0 { *(*unsafe.Pointer)(urv.ptr) = uv.ptr - } else if uv.ptr == unsafeZeroAddr { - if urv.ptr != unsafeZeroAddr { - typedmemclr(urv.typ, urv.ptr) - } - } else { + } else if uv.ptr != unsafeZeroAddr { typedmemmove(urv.typ, urv.ptr, uv.ptr) + } else if urv.ptr != unsafeZeroAddr { + typedmemclr(urv.typ, urv.ptr) } } @@ -812,11 +790,9 @@ func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Valu // It is typically called when we know that SetLen(...) cannot be done. func rvSlice(rv reflect.Value, length int) reflect.Value { urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - var x []struct{} - ux := (*unsafeSlice)(unsafe.Pointer(&x)) - *ux = *(*unsafeSlice)(urv.ptr) + ux := *(*unsafeSlice)(urv.ptr) // copy slice header ux.Len = length - urv.ptr = unsafe.Pointer(ux) + urv.ptr = unsafe.Pointer(&ux) return rv } @@ -834,10 +810,16 @@ func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value // ------------ -func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) { +func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo, isSlice bool) (v reflect.Value) { urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) uv := (*unsafeReflectValue)(unsafe.Pointer(&v)) - uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i)) + if isSlice { + uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data)) + } else { + uv.ptr = unsafe.Pointer(uintptr(urv.ptr)) + } + uv.ptr = unsafe.Add(uv.ptr, ti.elemsize*uint32(i)) + // uv.ptr = unsafe.Pointer(ptr + uintptr(int(ti.elemsize)*i)) uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr return @@ -861,19 +843,11 @@ func rvCapSlice(rv reflect.Value) int { return (*unsafeSlice)(urv.ptr).Cap } -func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) { - urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - uv := (*unsafeReflectValue)(unsafe.Pointer(&v)) - uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i)) - uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr - uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr - return -} - // if scratch is nil, then return a writable view (assuming canAddr=true) -func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) { +func rvGetArrayBytes(rv reflect.Value, _ []byte) (bs []byte) { urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) bx := (*unsafeSlice)(unsafe.Pointer(&bs)) + // bx.Data, bx.Len, bx.Cap = urv.ptr, rv.Len(), bx.Len bx.Data = urv.ptr bx.Len = rv.Len() bx.Cap = bx.Len @@ -889,7 +863,7 @@ func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) { // // Consequently, we use rvLenSlice, not rvCapSlice. - t := reflectArrayOf(rvLenSlice(rv), rv.Type().Elem()) + t := reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem()) // v = rvZeroAddrK(t, reflect.Array) uv := (*unsafeReflectValue)(unsafe.Pointer(&v)) @@ -921,99 +895,84 @@ func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) { // ------------ +func rvPtr(rv reflect.Value) unsafe.Pointer { + return (*unsafeReflectValue)(unsafe.Pointer(&rv)).ptr +} + func rvGetBool(rv reflect.Value) bool { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*bool)(v.ptr) + return *(*bool)(rvPtr(rv)) } func rvGetBytes(rv reflect.Value) []byte { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*[]byte)(v.ptr) + return *(*[]byte)(rvPtr(rv)) } func rvGetTime(rv reflect.Value) time.Time { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*time.Time)(v.ptr) + return *(*time.Time)(rvPtr(rv)) } func rvGetString(rv reflect.Value) string { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*string)(v.ptr) + return *(*string)(rvPtr(rv)) } func rvGetFloat64(rv reflect.Value) float64 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*float64)(v.ptr) + return *(*float64)(rvPtr(rv)) } func rvGetFloat32(rv reflect.Value) float32 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*float32)(v.ptr) + return *(*float32)(rvPtr(rv)) } func rvGetComplex64(rv reflect.Value) complex64 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*complex64)(v.ptr) + return *(*complex64)(rvPtr(rv)) } func rvGetComplex128(rv reflect.Value) complex128 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*complex128)(v.ptr) + return *(*complex128)(rvPtr(rv)) } func rvGetInt(rv reflect.Value) int { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*int)(v.ptr) + return *(*int)(rvPtr(rv)) } func rvGetInt8(rv reflect.Value) int8 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*int8)(v.ptr) + return *(*int8)(rvPtr(rv)) } func rvGetInt16(rv reflect.Value) int16 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*int16)(v.ptr) + return *(*int16)(rvPtr(rv)) } func rvGetInt32(rv reflect.Value) int32 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*int32)(v.ptr) + return *(*int32)(rvPtr(rv)) } func rvGetInt64(rv reflect.Value) int64 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*int64)(v.ptr) + return *(*int64)(rvPtr(rv)) } func rvGetUint(rv reflect.Value) uint { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*uint)(v.ptr) + return *(*uint)(rvPtr(rv)) } func rvGetUint8(rv reflect.Value) uint8 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*uint8)(v.ptr) + return *(*uint8)(rvPtr(rv)) } func rvGetUint16(rv reflect.Value) uint16 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*uint16)(v.ptr) + return *(*uint16)(rvPtr(rv)) } func rvGetUint32(rv reflect.Value) uint32 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*uint32)(v.ptr) + return *(*uint32)(rvPtr(rv)) } func rvGetUint64(rv reflect.Value) uint64 { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*uint64)(v.ptr) + return *(*uint64)(rvPtr(rv)) } func rvGetUintptr(rv reflect.Value) uintptr { - v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) - return *(*uintptr)(v.ptr) + return *(*uintptr)(rvPtr(rv)) } func rvLenMap(rv reflect.Value) int { @@ -1027,32 +986,6 @@ func rvLenMap(rv reflect.Value) int { return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv)))) } -// copy is an intrinsic, which may use asm if length is small, -// or make a runtime call to runtime.memmove if length is large. -// Performance suffers when you always call runtime.memmove function. -// -// Consequently, there's no value in a copybytes call - just call copy() directly - -// func copybytes(to, from []byte) (n int) { -// n = (*unsafeSlice)(unsafe.Pointer(&from)).Len -// memmove( -// (*unsafeSlice)(unsafe.Pointer(&to)).Data, -// (*unsafeSlice)(unsafe.Pointer(&from)).Data, -// uintptr(n), -// ) -// return -// } - -// func copybytestr(to []byte, from string) (n int) { -// n = (*unsafeSlice)(unsafe.Pointer(&from)).Len -// memmove( -// (*unsafeSlice)(unsafe.Pointer(&to)).Data, -// (*unsafeSlice)(unsafe.Pointer(&from)).Data, -// uintptr(n), -// ) -// return -// } - // Note: it is hard to find len(...) of an array type, // as that is a field in the arrayType representing the array, and hard to introspect. // @@ -1065,24 +998,26 @@ func rvLenMap(rv reflect.Value) int { // // It is more performant to provide a value that the map entry is set into, // and that elides the allocation. - -// go 1.4+ has runtime/hashmap.go or runtime/map.go which has a -// hIter struct with the first 2 values being key and value -// of the current iteration. // +// go 1.4 through go 1.23 (in runtime/hashmap.go or runtime/map.go) has a hIter struct +// with the first 2 values being pointers for key and value of the current iteration. +// The next 6 values are pointers, followed by numeric types (uintptr, uint8, bool, etc). // This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem. -// We bypass the reflect wrapper functions and just use the *hIter directly. // -// Though *hIter has many fields, we only care about the first 2. +// In go 1.24, swissmap was introduced, and it provides a compatibility layer +// for hIter (called linknameIter). This has only 2 pointer fields after the key and value pointers. // -// We directly embed this in unsafeMapIter below +// Note: We bypass the reflect wrapper functions and just use the *hIter directly. // -// hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words, -// so it fills multiple cache lines and can give some extra space to accomodate small growth. +// When 'faking' these types with our own, we MUST ensure that the GC sees the pointers +// appropriately. These are reflected in goversion_(no)swissmap_unsafe.go files. +// In these files, we pad the extra spaces appropriately. +// +// Note: the faux hIter/linknameIter is directly embedded in unsafeMapIter below type unsafeMapIter struct { mtyp, mptr unsafe.Pointer - k, v reflect.Value + k, v unsafeReflectValue kisref bool visref bool mapvalues bool @@ -1092,7 +1027,7 @@ type unsafeMapIter struct { it struct { key unsafe.Pointer value unsafe.Pointer - _ [20]uintptr // padding for other fields (to make up 32 words for enclosing struct) + _ unsafeMapIterPadding } } @@ -1112,18 +1047,16 @@ func (t *unsafeMapIter) Next() (r bool) { } if helperUnsafeDirectAssignMapEntry || t.kisref { - (*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key + t.k.ptr = t.it.key } else { - k := (*unsafeReflectValue)(unsafe.Pointer(&t.k)) - typedmemmove(k.typ, k.ptr, t.it.key) + typedmemmove(t.k.typ, t.k.ptr, t.it.key) } if t.mapvalues { if helperUnsafeDirectAssignMapEntry || t.visref { - (*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value + t.v.ptr = t.it.value } else { - v := (*unsafeReflectValue)(unsafe.Pointer(&t.v)) - typedmemmove(v.typ, v.ptr, t.it.value) + typedmemmove(t.v.typ, t.v.ptr, t.it.value) } } @@ -1131,11 +1064,11 @@ func (t *unsafeMapIter) Next() (r bool) { } func (t *unsafeMapIter) Key() (r reflect.Value) { - return t.k + return *(*reflect.Value)(unsafe.Pointer(&t.k)) } func (t *unsafeMapIter) Value() (r reflect.Value) { - return t.v + return *(*reflect.Value)(unsafe.Pointer(&t.v)) } func (t *unsafeMapIter) Done() {} @@ -1162,14 +1095,14 @@ func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) { // t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr)) mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it)) - t.k = k + t.k = *(*unsafeReflectValue)(unsafe.Pointer(&k)) t.kisref = refBitset.isset(byte(k.Kind())) if mapvalues { - t.v = v + t.v = *(*unsafeReflectValue)(unsafe.Pointer(&v)) t.visref = refBitset.isset(byte(v.Kind())) } else { - t.v = reflect.Value{} + t.v = unsafeReflectValue{} } } @@ -1182,13 +1115,6 @@ func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer { return urv.ptr } -// func mapDelete(m, k reflect.Value) { -// var urv = (*unsafeReflectValue)(unsafe.Pointer(&k)) -// var kptr = unsafeMapKVPtr(urv) -// urv = (*unsafeReflectValue)(unsafe.Pointer(&m)) -// mapdelete(urv.typ, rv2ptr(urv), kptr) -// } - // return an addressable reflect value that can be used in mapRange and mapGet operations. // // all calls to mapGet or mapRange will call here to get an addressable reflect.Value. @@ -1205,53 +1131,39 @@ func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) { return } -// ---------- ENCODER optimized --------------- - -func (e *Encoder) jsondriver() *jsonEncDriver { - return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr) +func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) { + t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + urv.typ = t + urv.flag = uintptr(reflect.Map) + urv.ptr = makemap(t, size, nil) + return } -func (d *Decoder) zerocopystate() bool { - return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy -} - -func (d *Decoder) stringZC(v []byte) (s string) { - // MARKER: inline zerocopystate directly so genHelper forwarding function fits within inlining cost - - // if d.zerocopystate() { - if d.decByteState == decByteStateZerocopy && d.h.ZeroCopy { - return stringView(v) - } - return d.string(v) -} - -func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string { - if !d.zerocopystate() { - *callFnRvk = true - if d.decByteState == decByteStateReuseBuf { - *kstrbs = append((*kstrbs)[:0], (*kstr2bs)...) - *kstr2bs = *kstrbs - } - } - return stringView(*kstr2bs) -} - -// ---------- DECODER optimized --------------- - -func (d *Decoder) jsondriver() *jsonDecDriver { - return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr) +func (d *decoderBase) bytes2Str(in []byte, state dBytesAttachState) (s string, mutable bool) { + return stringView(in), state <= dBytesAttachBuffer } // ---------- structFieldInfo optimized --------------- -func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) { +func (n *structFieldInfoNode) rvField(v reflect.Value) (rv reflect.Value) { // we already know this is exported, and maybe embedded (based on what si says) uv := (*unsafeReflectValue)(unsafe.Pointer(&v)) + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) // clear flagEmbedRO if necessary, and inherit permission bits from v urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind) urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset)) + + // *(*unsafeReflectValue)(unsafe.Pointer(&rv)) = unsafeReflectValue{ + // unsafeIntf: unsafeIntf{ + // typ: ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr, + // ptr: unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset)), + // }, + // flag: uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind), + // } + return } @@ -1299,10 +1211,6 @@ func unsafeNew(typ unsafe.Pointer) unsafe.Pointer { // failing with "error: undefined reference" error. // however, runtime.{mallocgc, newarray} are supported, so use that instead. -//go:linkname memmove runtime.memmove -//go:noescape -func memmove(to, from unsafe.Pointer, n uintptr) - //go:linkname mallocgc runtime.mallocgc //go:noescape func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer @@ -1319,10 +1227,6 @@ func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer) //go:noescape func mapiternext(it unsafe.Pointer) (key unsafe.Pointer) -//go:linkname mapdelete runtime.mapdelete -//go:noescape -func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) - //go:linkname mapassign runtime.mapassign //go:noescape func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer @@ -1331,6 +1235,10 @@ func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe. //go:noescape func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool) +//go:linkname makemap runtime.makemap +//go:noescape +func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer + // reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not, // and if a writeBarrier is needed, before delegating to the right method in the runtime. // diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go index a5c7d59a0..7d1349304 100644 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a MIT license found in the LICENSE file. //go:build !safe && !codec.safe && !appengine && go1.9 && gc -// +build !safe,!codec.safe,!appengine,go1.9,gc package codec @@ -24,8 +23,67 @@ const ( mapMaxElemSize = 128 ) -func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsafeSlice) { - return growslice(typ, old, cap+incr) +type mapKeyFastKind uint8 + +const ( + mapKeyFastKindAny = iota + 1 + mapKeyFastKind32 + mapKeyFastKind32ptr + mapKeyFastKind64 + mapKeyFastKind64ptr + mapKeyFastKindStr +) + +var mapKeyFastKindVals [32]mapKeyFastKind + +type mapReqParams struct { + kfast mapKeyFastKind + ref bool + indirect bool +} + +func getMapReqParams(ti *typeInfo) (r mapReqParams) { + r.indirect = mapStoresElemIndirect(uintptr(ti.elemsize)) + r.ref = refBitset.isset(ti.elemkind) + r.kfast = mapKeyFastKindFor(reflect.Kind(ti.keykind)) + return +} + +func init() { + xx := func(f mapKeyFastKind, k ...reflect.Kind) { + for _, v := range k { + mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31' + } + } + + var f mapKeyFastKind + + f = mapKeyFastKind64 + if wordSizeBits == 32 { + f = mapKeyFastKind32 + } + xx(f, reflect.Int, reflect.Uint, reflect.Uintptr) + + f = mapKeyFastKind64ptr + if wordSizeBits == 32 { + f = mapKeyFastKind32ptr + } + xx(f, reflect.Ptr) + + xx(mapKeyFastKindStr, reflect.String) + xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32) + xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64) +} + +func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind { + return mapKeyFastKindVals[k&31] +} + +func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (s unsafeSlice) { + // culled from GOROOT/runtime/slice.go + s = rtgrowslice(old.Data, old.Cap+incr, old.Cap, incr, typ) + s.Len = old.Len + return } // func rvType(rv reflect.Value) reflect.Type { @@ -43,7 +101,7 @@ func mapStoresElemIndirect(elemsize uintptr) bool { return elemsize > mapMaxElemSize } -func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) { +func mapSet(m, k, v reflect.Value, p mapReqParams) { // valIsRef var urv = (*unsafeReflectValue)(unsafe.Pointer(&k)) var kptr = unsafeMapKVPtr(urv) urv = (*unsafeReflectValue)(unsafe.Pointer(&v)) @@ -60,14 +118,15 @@ func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va // Sometimes, we got vvptr == nil when we dereferenced vvptr (if valIsIndirect). // Consequently, only use fastXXX functions if !valIsIndirect - if valIsIndirect { + if p.indirect { vvptr = mapassign(urv.typ, mptr, kptr) - typedmemmove(vtyp, vvptr, vptr) - // reflect_mapassign(urv.typ, mptr, kptr, vptr) - return + // typedmemmove(vtyp, vvptr, vptr) + // // reflect_mapassign(urv.typ, mptr, kptr, vptr) + // return + goto END } - switch keyFastKind { + switch p.kfast { case mapKeyFastKind32: vvptr = mapassign_fast32(urv.typ, mptr, *(*uint32)(kptr)) case mapKeyFastKind32ptr: @@ -82,14 +141,14 @@ func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va vvptr = mapassign(urv.typ, mptr, kptr) } - // if keyFastKind != 0 && valIsIndirect { + // if p.kfast != 0 && valIsIndirect { // vvptr = *(*unsafe.Pointer)(vvptr) // } - +END: typedmemmove(vtyp, vvptr, vptr) } -func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) (_ reflect.Value) { +func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) { var urv = (*unsafeReflectValue)(unsafe.Pointer(&k)) var kptr = unsafeMapKVPtr(urv) urv = (*unsafeReflectValue)(unsafe.Pointer(&m)) @@ -101,7 +160,7 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va // Note that mapaccess2_fastXXX functions do not check if the value needs to be copied. // if they do, we should dereference the pointer and return that - switch keyFastKind { + switch p.kfast { case mapKeyFastKind32, mapKeyFastKind32ptr: vvptr, ok = mapaccess2_fast32(urv.typ, mptr, *(*uint32)(kptr)) case mapKeyFastKind64, mapKeyFastKind64ptr: @@ -118,9 +177,9 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va urv = (*unsafeReflectValue)(unsafe.Pointer(&v)) - if keyFastKind != 0 && valIsIndirect { + if p.kfast != 0 && p.indirect { urv.ptr = *(*unsafe.Pointer)(vvptr) - } else if helperUnsafeDirectAssignMapEntry || valIsRef { + } else if helperUnsafeDirectAssignMapEntry || p.ref { urv.ptr = vvptr } else { typedmemmove(urv.typ, urv.ptr, vvptr) @@ -129,13 +188,11 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va return v } +// ---- + //go:linkname unsafeZeroArr runtime.zeroVal var unsafeZeroArr [1024]byte -// //go:linkname rvPtrToType reflect.toType -// //go:noescape -// func rvPtrToType(typ unsafe.Pointer) reflect.Type - //go:linkname mapassign_fast32 runtime.mapassign_fast32 //go:noescape func mapassign_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) unsafe.Pointer @@ -167,3 +224,19 @@ func mapaccess2_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) (val un //go:linkname mapaccess2_faststr runtime.mapaccess2_faststr //go:noescape func mapaccess2_faststr(typ unsafe.Pointer, m unsafe.Pointer, key string) (val unsafe.Pointer, ok bool) + +//go:linkname rtgrowslice runtime.growslice +//go:noescape +func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice + +// ---- + +// //go:linkname rvPtrToType reflect.toType +// //go:noescape +// func rvPtrToType(typ unsafe.Pointer) reflect.Type + +// //go:linkname growslice reflect.growslice +// //go:noescape +// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice + +// ---- diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go index bd9fdedb6..e33db296e 100644 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a MIT license found in the LICENSE file. //go:build !safe && !codec.safe && !appengine && go1.9 && !gc -// +build !safe,!codec.safe,!appengine,go1.9,!gc package codec @@ -14,6 +13,15 @@ import ( var unsafeZeroArr [1024]byte +type mapReqParams struct { + ref bool +} + +func getMapReqParams(ti *typeInfo) (r mapReqParams) { + r.ref = refBitset.isset(ti.elemkind) + return +} + // runtime.growslice does not work with gccgo, failing with "growslice: cap out of range" error. // consequently, we just call newarray followed by typedslicecopy directly. @@ -31,18 +39,11 @@ func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsa return } -// func unsafeNew(t reflect.Type, typ unsafe.Pointer) unsafe.Pointer { -// rv := reflect.New(t) -// return ((*unsafeReflectValue)(unsafe.Pointer(&rv))).ptr -// } - // runtime.{mapassign_fastXXX, mapaccess2_fastXXX} are not supported in gollvm, // failing with "error: undefined reference" error. // so we just use runtime.{mapassign, mapaccess2} directly -func mapStoresElemIndirect(elemsize uintptr) bool { return false } - -func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) { +func mapSet(m, k, v reflect.Value, p mapReqParams) { var urv = (*unsafeReflectValue)(unsafe.Pointer(&k)) var kptr = unsafeMapKVPtr(urv) urv = (*unsafeReflectValue)(unsafe.Pointer(&v)) @@ -56,7 +57,7 @@ func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) { typedmemmove(vtyp, vvptr, vptr) } -func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflect.Value) { +func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) { var urv = (*unsafeReflectValue)(unsafe.Pointer(&k)) var kptr = unsafeMapKVPtr(urv) urv = (*unsafeReflectValue)(unsafe.Pointer(&m)) @@ -70,7 +71,7 @@ func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflec urv = (*unsafeReflectValue)(unsafe.Pointer(&v)) - if helperUnsafeDirectAssignMapEntry || valIsRef { + if helperUnsafeDirectAssignMapEntry || p.ref { urv.ptr = vvptr } else { typedmemmove(urv.typ, urv.ptr, vvptr) diff --git a/vendor/github.com/ugorji/go/codec/init.mono.go b/vendor/github.com/ugorji/go/codec/init.mono.go new file mode 100644 index 000000000..f4133b026 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/init.mono.go @@ -0,0 +1,130 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build !notmono && !codec.notmono + +package codec + +import "io" + +func callMake(v interface{}) {} + +type encWriter interface{ encWriterI } +type decReader interface{ decReaderI } +type encDriver interface{ encDriverI } +type decDriver interface{ decDriverI } + +func (h *SimpleHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriverSimpleBytes{}.newEncoderBytes(out, h) +} + +func (h *SimpleHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriverSimpleIO{}.newEncoderIO(w, h) +} + +func (h *SimpleHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriverSimpleBytes{}.newDecoderBytes(in, h) +} + +func (h *SimpleHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriverSimpleIO{}.newDecoderIO(r, h) +} + +func (h *JsonHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriverJsonBytes{}.newEncoderBytes(out, h) +} + +func (h *JsonHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriverJsonIO{}.newEncoderIO(w, h) +} + +func (h *JsonHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriverJsonBytes{}.newDecoderBytes(in, h) +} + +func (h *JsonHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriverJsonIO{}.newDecoderIO(r, h) +} + +func (h *MsgpackHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriverMsgpackBytes{}.newEncoderBytes(out, h) +} + +func (h *MsgpackHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriverMsgpackIO{}.newEncoderIO(w, h) +} + +func (h *MsgpackHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriverMsgpackBytes{}.newDecoderBytes(in, h) +} + +func (h *MsgpackHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriverMsgpackIO{}.newDecoderIO(r, h) +} + +func (h *BincHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriverBincBytes{}.newEncoderBytes(out, h) +} + +func (h *BincHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriverBincIO{}.newEncoderIO(w, h) +} + +func (h *BincHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriverBincBytes{}.newDecoderBytes(in, h) +} + +func (h *BincHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriverBincIO{}.newDecoderIO(r, h) +} + +func (h *CborHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriverCborBytes{}.newEncoderBytes(out, h) +} + +func (h *CborHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriverCborIO{}.newEncoderIO(w, h) +} + +func (h *CborHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriverCborBytes{}.newDecoderBytes(in, h) +} + +func (h *CborHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriverCborIO{}.newDecoderIO(r, h) +} + +var ( + bincFpEncIO = helperEncDriverBincIO{}.fastpathEList() + bincFpEncBytes = helperEncDriverBincBytes{}.fastpathEList() + bincFpDecIO = helperDecDriverBincIO{}.fastpathDList() + bincFpDecBytes = helperDecDriverBincBytes{}.fastpathDList() +) + +var ( + cborFpEncIO = helperEncDriverCborIO{}.fastpathEList() + cborFpEncBytes = helperEncDriverCborBytes{}.fastpathEList() + cborFpDecIO = helperDecDriverCborIO{}.fastpathDList() + cborFpDecBytes = helperDecDriverCborBytes{}.fastpathDList() +) + +var ( + jsonFpEncIO = helperEncDriverJsonIO{}.fastpathEList() + jsonFpEncBytes = helperEncDriverJsonBytes{}.fastpathEList() + jsonFpDecIO = helperDecDriverJsonIO{}.fastpathDList() + jsonFpDecBytes = helperDecDriverJsonBytes{}.fastpathDList() +) + +var ( + msgpackFpEncIO = helperEncDriverMsgpackIO{}.fastpathEList() + msgpackFpEncBytes = helperEncDriverMsgpackBytes{}.fastpathEList() + msgpackFpDecIO = helperDecDriverMsgpackIO{}.fastpathDList() + msgpackFpDecBytes = helperDecDriverMsgpackBytes{}.fastpathDList() +) + +var ( + simpleFpEncIO = helperEncDriverSimpleIO{}.fastpathEList() + simpleFpEncBytes = helperEncDriverSimpleBytes{}.fastpathEList() + simpleFpDecIO = helperDecDriverSimpleIO{}.fastpathDList() + simpleFpDecBytes = helperDecDriverSimpleBytes{}.fastpathDList() +) diff --git a/vendor/github.com/ugorji/go/codec/init.notmono.go b/vendor/github.com/ugorji/go/codec/init.notmono.go new file mode 100644 index 000000000..b03fb8503 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/init.notmono.go @@ -0,0 +1,313 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +//go:build notmono || codec.notmono + +package codec + +import ( + "io" +) + +// This contains all the iniatializations of generics. +// Putting it into one file, ensures that we can go generics or not. + +type maker interface{ Make() } + +func callMake(v interface{}) { + v.(maker).Make() +} + +// ---- (writer.go) + +type encWriter interface { + bufioEncWriterM | bytesEncAppenderM + encWriterI +} + +type bytesEncAppenderM struct { + *bytesEncAppender +} + +func (z *bytesEncAppenderM) Make() { + z.bytesEncAppender = new(bytesEncAppender) + z.out = &bytesEncAppenderDefOut +} + +type bufioEncWriterM struct { + *bufioEncWriter +} + +func (z *bufioEncWriterM) Make() { + z.bufioEncWriter = new(bufioEncWriter) + z.w = io.Discard +} + +// ---- reader.go + +type decReader interface { + bytesDecReaderM | ioDecReaderM + + decReaderI +} + +type bytesDecReaderM struct { + *bytesDecReader +} + +func (z *bytesDecReaderM) Make() { + z.bytesDecReader = new(bytesDecReader) +} + +type ioDecReaderM struct { + *ioDecReader +} + +func (z *ioDecReaderM) Make() { + z.ioDecReader = new(ioDecReader) +} + +// type helperEncWriter[T encWriter] struct{} +// type helperDecReader[T decReader] struct{} +// func (helperDecReader[T]) decByteSlice(r T, clen, maxInitLen int, bs []byte) (bsOut []byte) { + +// ---- (encode.go) + +type encDriver interface { + simpleEncDriverM[bufioEncWriterM] | + simpleEncDriverM[bytesEncAppenderM] | + jsonEncDriverM[bufioEncWriterM] | + jsonEncDriverM[bytesEncAppenderM] | + cborEncDriverM[bufioEncWriterM] | + cborEncDriverM[bytesEncAppenderM] | + msgpackEncDriverM[bufioEncWriterM] | + msgpackEncDriverM[bytesEncAppenderM] | + bincEncDriverM[bufioEncWriterM] | + bincEncDriverM[bytesEncAppenderM] + + encDriverI +} + +// ---- (decode.go) + +type decDriver interface { + simpleDecDriverM[bytesDecReaderM] | + simpleDecDriverM[ioDecReaderM] | + jsonDecDriverM[bytesDecReaderM] | + jsonDecDriverM[ioDecReaderM] | + cborDecDriverM[bytesDecReaderM] | + cborDecDriverM[ioDecReaderM] | + msgpackDecDriverM[bytesDecReaderM] | + msgpackDecDriverM[ioDecReaderM] | + bincDecDriverM[bytesDecReaderM] | + bincDecDriverM[ioDecReaderM] + + decDriverI +} + +// Below: .go files + +// ---- (binc.go) + +type bincEncDriverM[T encWriter] struct { + *bincEncDriver[T] +} + +func (d *bincEncDriverM[T]) Make() { + d.bincEncDriver = new(bincEncDriver[T]) +} + +type bincDecDriverM[T decReader] struct { + *bincDecDriver[T] +} + +func (d *bincDecDriverM[T]) Make() { + d.bincDecDriver = new(bincDecDriver[T]) +} + +var ( + bincFpEncIO = helperEncDriver[bincEncDriverM[bufioEncWriterM]]{}.fastpathEList() + bincFpEncBytes = helperEncDriver[bincEncDriverM[bytesEncAppenderM]]{}.fastpathEList() + bincFpDecIO = helperDecDriver[bincDecDriverM[ioDecReaderM]]{}.fastpathDList() + bincFpDecBytes = helperDecDriver[bincDecDriverM[bytesDecReaderM]]{}.fastpathDList() +) + +// ---- (cbor.go) + +type cborEncDriverM[T encWriter] struct { + *cborEncDriver[T] +} + +func (d *cborEncDriverM[T]) Make() { + d.cborEncDriver = new(cborEncDriver[T]) +} + +type cborDecDriverM[T decReader] struct { + *cborDecDriver[T] +} + +func (d *cborDecDriverM[T]) Make() { + d.cborDecDriver = new(cborDecDriver[T]) +} + +var ( + cborFpEncIO = helperEncDriver[cborEncDriverM[bufioEncWriterM]]{}.fastpathEList() + cborFpEncBytes = helperEncDriver[cborEncDriverM[bytesEncAppenderM]]{}.fastpathEList() + cborFpDecIO = helperDecDriver[cborDecDriverM[ioDecReaderM]]{}.fastpathDList() + cborFpDecBytes = helperDecDriver[cborDecDriverM[bytesDecReaderM]]{}.fastpathDList() +) + +// ---- (json.go) + +type jsonEncDriverM[T encWriter] struct { + *jsonEncDriver[T] +} + +func (d *jsonEncDriverM[T]) Make() { + d.jsonEncDriver = new(jsonEncDriver[T]) +} + +type jsonDecDriverM[T decReader] struct { + *jsonDecDriver[T] +} + +func (d *jsonDecDriverM[T]) Make() { + d.jsonDecDriver = new(jsonDecDriver[T]) +} + +var ( + jsonFpEncIO = helperEncDriver[jsonEncDriverM[bufioEncWriterM]]{}.fastpathEList() + jsonFpEncBytes = helperEncDriver[jsonEncDriverM[bytesEncAppenderM]]{}.fastpathEList() + jsonFpDecIO = helperDecDriver[jsonDecDriverM[ioDecReaderM]]{}.fastpathDList() + jsonFpDecBytes = helperDecDriver[jsonDecDriverM[bytesDecReaderM]]{}.fastpathDList() +) + +// ---- (msgpack.go) + +type msgpackEncDriverM[T encWriter] struct { + *msgpackEncDriver[T] +} + +func (d *msgpackEncDriverM[T]) Make() { + d.msgpackEncDriver = new(msgpackEncDriver[T]) +} + +type msgpackDecDriverM[T decReader] struct { + *msgpackDecDriver[T] +} + +func (d *msgpackDecDriverM[T]) Make() { + d.msgpackDecDriver = new(msgpackDecDriver[T]) +} + +var ( + msgpackFpEncIO = helperEncDriver[msgpackEncDriverM[bufioEncWriterM]]{}.fastpathEList() + msgpackFpEncBytes = helperEncDriver[msgpackEncDriverM[bytesEncAppenderM]]{}.fastpathEList() + msgpackFpDecIO = helperDecDriver[msgpackDecDriverM[ioDecReaderM]]{}.fastpathDList() + msgpackFpDecBytes = helperDecDriver[msgpackDecDriverM[bytesDecReaderM]]{}.fastpathDList() +) + +// ---- (simple.go) + +type simpleEncDriverM[T encWriter] struct { + *simpleEncDriver[T] +} + +func (d *simpleEncDriverM[T]) Make() { + d.simpleEncDriver = new(simpleEncDriver[T]) +} + +type simpleDecDriverM[T decReader] struct { + *simpleDecDriver[T] +} + +func (d *simpleDecDriverM[T]) Make() { + d.simpleDecDriver = new(simpleDecDriver[T]) +} + +var ( + simpleFpEncIO = helperEncDriver[simpleEncDriverM[bufioEncWriterM]]{}.fastpathEList() + simpleFpEncBytes = helperEncDriver[simpleEncDriverM[bytesEncAppenderM]]{}.fastpathEList() + simpleFpDecIO = helperDecDriver[simpleDecDriverM[ioDecReaderM]]{}.fastpathDList() + simpleFpDecBytes = helperDecDriver[simpleDecDriverM[bytesDecReaderM]]{}.fastpathDList() +) + +func (h *SimpleHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriver[simpleEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h) +} + +func (h *SimpleHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriver[simpleEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h) +} + +func (h *SimpleHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriver[simpleDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h) +} + +func (h *SimpleHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriver[simpleDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h) +} + +func (h *JsonHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriver[jsonEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h) +} + +func (h *JsonHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriver[jsonEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h) +} + +func (h *JsonHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriver[jsonDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h) +} + +func (h *JsonHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriver[jsonDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h) +} + +func (h *MsgpackHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriver[msgpackEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h) +} + +func (h *MsgpackHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriver[msgpackEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h) +} + +func (h *MsgpackHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriver[msgpackDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h) +} + +func (h *MsgpackHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriver[msgpackDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h) +} + +func (h *CborHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriver[cborEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h) +} + +func (h *CborHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriver[cborEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h) +} + +func (h *CborHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriver[cborDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h) +} + +func (h *CborHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriver[cborDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h) +} + +func (h *BincHandle) newEncoderBytes(out *[]byte) encoderI { + return helperEncDriver[bincEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h) +} + +func (h *BincHandle) newEncoder(w io.Writer) encoderI { + return helperEncDriver[bincEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h) +} + +func (h *BincHandle) newDecoderBytes(in []byte) decoderI { + return helperDecDriver[bincDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h) +} + +func (h *BincHandle) newDecoder(r io.Reader) decoderI { + return helperDecDriver[bincDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h) +} diff --git a/vendor/github.com/ugorji/go/codec/json.base.go b/vendor/github.com/ugorji/go/codec/json.base.go new file mode 100644 index 000000000..d87403393 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.base.go @@ -0,0 +1,504 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding/base32" + "encoding/base64" + "errors" + "math" + "reflect" + "strings" + "time" + "unicode" +) + +//-------------------------------- + +// jsonLits and jsonLitb are defined at the package level, +// so they are guaranteed to be stored efficiently, making +// for better append/string comparison/etc. +// +// (anecdotal evidence from some benchmarking on go 1.20 devel in 20220104) +const jsonLits = `"true"false"null"{}[]` + +const ( + jsonLitT = 1 + jsonLitF = 6 + jsonLitN = 12 + jsonLitM = 17 + jsonLitA = 19 +) + +var jsonLitb = []byte(jsonLits) +var jsonNull = jsonLitb[jsonLitN : jsonLitN+4] +var jsonArrayEmpty = jsonLitb[jsonLitA : jsonLitA+2] +var jsonMapEmpty = jsonLitb[jsonLitM : jsonLitM+2] + +const jsonEncodeUintSmallsString = "" + + "00010203040506070809" + + "10111213141516171819" + + "20212223242526272829" + + "30313233343536373839" + + "40414243444546474849" + + "50515253545556575859" + + "60616263646566676869" + + "70717273747576777879" + + "80818283848586878889" + + "90919293949596979899" + +var jsonEncodeUintSmallsStringBytes = (*[len(jsonEncodeUintSmallsString)]byte)([]byte(jsonEncodeUintSmallsString)) + +const ( + jsonU4Chk2 = '0' + jsonU4Chk1 = 'a' - 10 + jsonU4Chk0 = 'A' - 10 +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + // jsonEscapeMultiByteUnicodeSep controls whether some unicode characters + // that are valid json but may bomb in some contexts are escaped during encoeing. + // + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + jsonEscapeMultiByteUnicodeSep = true + + // jsonNakedBoolNumInQuotedStr is used during decoding into a blank interface{} + // to control whether we detect quoted values of bools and null where a map key is expected, + // and treat as nil, true or false. + jsonNakedBoolNumInQuotedStr = true +) + +var ( + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs [32]byte + jsonSpaces [128]byte + + jsonHexEncoder hexEncoder + // jsonTimeLayout is used to validate time layouts. + // Unfortunately, we couldn't compare time.Time effectively, so punted. + // jsonTimeLayout time.Time +) + +func init() { + for i := 0; i < len(jsonTabs); i++ { + jsonTabs[i] = '\t' + } + for i := 0; i < len(jsonSpaces); i++ { + jsonSpaces[i] = ' ' + } + // jsonTimeLayout, err := time.Parse(time.Layout, time.Layout) + // halt.onerror(err) + // jsonTimeLayout = jsonTimeLayout.Round(time.Second).UTC() +} + +// ---------------- + +type jsonBytesFmt uint8 + +const ( + jsonBytesFmtArray jsonBytesFmt = iota + 1 + jsonBytesFmtBase64 + jsonBytesFmtBase64url + jsonBytesFmtBase32 + jsonBytesFmtBase32hex + jsonBytesFmtBase16 + + jsonBytesFmtHex = jsonBytesFmtBase16 +) + +type jsonTimeFmt uint8 + +const ( + jsonTimeFmtStringLayout jsonTimeFmt = iota + 1 + jsonTimeFmtUnix + jsonTimeFmtUnixMilli + jsonTimeFmtUnixMicro + jsonTimeFmtUnixNano +) + +type jsonBytesFmter = bytesEncoder + +type jsonHandleOpts struct { + rawext bool + // bytesFmt used during encode to determine how to encode []byte + bytesFmt jsonBytesFmt + // timeFmt used during encode to determine how to encode a time.Time + timeFmt jsonTimeFmt + // timeFmtNum used during decode to decode a time.Time from an int64 in the stream + timeFmtNum jsonTimeFmt + // timeFmtLayouts used on decode, to try to parse time.Time until successful + timeFmtLayouts []string + // byteFmters used on decode, to try to parse []byte from a UTF-8 string encoding (e.g. base64) + byteFmters []jsonBytesFmter +} + +func jsonCheckTimeLayout(s string) (ok bool) { + _, err := time.Parse(s, s) + // t...Equal(jsonTimeLayout) always returns false - unsure why + // return err == nil && t.Round(time.Second).UTC().Equal(jsonTimeLayout) + return err == nil +} + +func (x *jsonHandleOpts) reset(h *JsonHandle) { + x.timeFmt = 0 + x.timeFmtNum = 0 + x.timeFmtLayouts = x.timeFmtLayouts[:0] + if len(h.TimeFormat) != 0 { + switch h.TimeFormat[0] { + case "unix": + x.timeFmt = jsonTimeFmtUnix + case "unixmilli": + x.timeFmt = jsonTimeFmtUnixMilli + case "unixmicro": + x.timeFmt = jsonTimeFmtUnixMicro + case "unixnano": + x.timeFmt = jsonTimeFmtUnixNano + } + x.timeFmtNum = x.timeFmt + for _, v := range h.TimeFormat { + if !strings.HasPrefix(v, "unix") && jsonCheckTimeLayout(v) { + x.timeFmtLayouts = append(x.timeFmtLayouts, v) + } + } + } + if x.timeFmt == 0 { // both timeFmt and timeFmtNum are 0 + x.timeFmtNum = jsonTimeFmtUnix + x.timeFmt = jsonTimeFmtStringLayout + if len(x.timeFmtLayouts) == 0 { + x.timeFmtLayouts = append(x.timeFmtLayouts, time.RFC3339Nano) + } + } + + x.bytesFmt = 0 + x.byteFmters = x.byteFmters[:0] + var b64 bool + if len(h.BytesFormat) != 0 { + switch h.BytesFormat[0] { + case "array": + x.bytesFmt = jsonBytesFmtArray + case "base64": + x.bytesFmt = jsonBytesFmtBase64 + case "base64url": + x.bytesFmt = jsonBytesFmtBase64url + case "base32": + x.bytesFmt = jsonBytesFmtBase32 + case "base32hex": + x.bytesFmt = jsonBytesFmtBase32hex + case "base16", "hex": + x.bytesFmt = jsonBytesFmtBase16 + } + for _, v := range h.BytesFormat { + switch v { + // case "array": + case "base64": + x.byteFmters = append(x.byteFmters, base64.StdEncoding) + b64 = true + case "base64url": + x.byteFmters = append(x.byteFmters, base64.URLEncoding) + case "base32": + x.byteFmters = append(x.byteFmters, base32.StdEncoding) + case "base32hex": + x.byteFmters = append(x.byteFmters, base32.HexEncoding) + case "base16", "hex": + x.byteFmters = append(x.byteFmters, &jsonHexEncoder) + } + } + } + if x.bytesFmt == 0 { + // either len==0 OR gibberish was in the first element; resolve here + x.bytesFmt = jsonBytesFmtBase64 + if !b64 { // not present - so insert into pos 0 + x.byteFmters = append(x.byteFmters, nil) + copy(x.byteFmters[1:], x.byteFmters[0:]) + x.byteFmters[0] = base64.StdEncoding + } + } + // ---- + x.rawext = h.RawBytesExt != nil +} + +var jsonEncBoolStrs = [2][2]string{ + {jsonLits[jsonLitF : jsonLitF+5], jsonLits[jsonLitT : jsonLitT+4]}, + {jsonLits[jsonLitF-1 : jsonLitF+6], jsonLits[jsonLitT-1 : jsonLitT+5]}, +} + +func jsonEncodeUint(neg, quotes bool, u uint64, b *[48]byte) []byte { + // MARKER: use setByteAt/byteAt to elide the bounds-checks + // when we are sure that we don't go beyond the bounds. + + // MARKER: copied mostly from std library: strconv/itoa.go + // this should only be called on 64bit OS. + + var ss = jsonEncodeUintSmallsStringBytes[:] + + // typically, 19 or 20 bytes sufficient for decimal encoding a uint64 + var a = b[:24] + var i = uint(len(a)) + + if quotes { + i-- + setByteAt(a, i, '"') + // a[i] = '"' + } + var is, us uint // use uint, as those fit into a register on the platform + if cpu32Bit { + for u >= 1e9 { + q := u / 1e9 + us = uint(u - q*1e9) // u % 1e9 fits into a uint + for j := 4; j > 0; j-- { + is = us % 100 * 2 + us /= 100 + i -= 2 + setByteAt(a, i+1, byteAt(ss, is+1)) + setByteAt(a, i, byteAt(ss, is)) + } + i-- + setByteAt(a, i, byteAt(ss, us*2+1)) + u = q + } + // u is now < 1e9, so is guaranteed to fit into a uint + } + us = uint(u) + for us >= 100 { + is = us % 100 * 2 + us /= 100 + i -= 2 + setByteAt(a, i+1, byteAt(ss, is+1)) + setByteAt(a, i, byteAt(ss, is)) + // a[i+1], a[i] = ss[is+1], ss[is] + } + + // us < 100 + is = us * 2 + i-- + setByteAt(a, i, byteAt(ss, is+1)) + // a[i] = ss[is+1] + if us >= 10 { + i-- + setByteAt(a, i, byteAt(ss, is)) + // a[i] = ss[is] + } + if neg { + i-- + setByteAt(a, i, '-') + // a[i] = '-' + } + if quotes { + i-- + setByteAt(a, i, '"') + // a[i] = '"' + } + return a[i:] +} + +// MARKER: checkLitErr methods to prevent the got/expect parameters from escaping + +//go:noinline +func jsonCheckLitErr3(got, expect [3]byte) { + halt.errorf("expecting %s: got %s", expect, got) +} + +//go:noinline +func jsonCheckLitErr4(got, expect [4]byte) { + halt.errorf("expecting %s: got %s", expect, got) +} + +func jsonSlashURune(cs [4]byte) (rr uint32) { + for _, c := range cs { + // best to use explicit if-else + // - not a table, etc which involve memory loads, array lookup with bounds checks, etc + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + return unicode.ReplacementChar + } + } + return +} + +func jsonNakedNum(z *fauxUnion, bs []byte, preferFloat, signedInt bool) (err error) { + // Note: jsonNakedNum is NEVER called with a zero-length []byte + if preferFloat { + z.v = valueTypeFloat + z.f, err = parseFloat64(bs) + } else { + err = parseNumber(bs, z, signedInt) + } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. +// - decode integers from float formatted numbers e.g. 1.27e+8 +// - decode any json value (numbers, bool, etc) from quoted strings +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are +// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. +// +// Note also that the float values for NaN, +Inf or -Inf are encoded as null, +// as suggested by NOTE 4 of the ECMA-262 ECMAScript Language Specification 5.1 edition. +// see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf . +// +// Note the following behaviour differences vs std-library encoding/json package: +// - struct field names matched in case-sensitive manner +type JsonHandle struct { + textEncodingType + BasicHandle + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString byte + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool + + // _ uint64 // padding (cache line) + + // Note: below, we store hardly-used items e.g. RawBytesExt. + // These values below may straddle a cache line, but they are hardly-used, + // so shouldn't contribute to false-sharing except in rare cases. + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + // TimeFormat is an array of strings representing a time.Time format, with each one being either + // a layout that honor the time.Time.Format specification. + // In addition, at most one of the set below (unix, unixmilli, unixmicro, unixnana) can be specified + // supporting encoding and decoding time as a number relative to the time epoch of Jan 1, 1970. + // + // During encode of a time.Time, the first entry in the array is used (defaults to RFC 3339). + // + // During decode, + // - if a string, then each of the layout formats will be tried in order until a time.Time is decoded. + // - if a number, then the sole unix entry is used. + TimeFormat []string + + // BytesFormat is an array of strings representing how bytes are encoded. + // + // Supported values are base64 (default), base64url, base32, base32hex, base16 (synonymous with hex) and array. + // + // array is a special value configuring that bytes are encoded as a sequence of numbers. + // + // During encode of a []byte, the first entry is used (defaults to base64 if none specified). + // + // During decode + // - if a string, then attempt decoding using each format in sequence until successful. + // - if an array, then decode normally + BytesFormat []string +} + +func (h *JsonHandle) isJson() bool { return true } + +// Name returns the name of the handle: json +func (h *JsonHandle) Name() string { return "json" } + +// func (h *JsonHandle) desc(bd byte) string { return str4byte(bd) } +func (h *JsonHandle) desc(bd byte) string { return string(bd) } + +func (h *JsonHandle) typical() bool { + return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' +} + +// SetInterfaceExt sets an extension +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, makeExt(ext)) +} + +func jsonFloatStrconvFmtPrec64(f float64) (fmt byte, prec int8) { + fmt = 'f' + prec = -1 + fbits := math.Float64bits(f) + abs := math.Float64frombits(fbits &^ (1 << 63)) + if abs == 0 || abs == 1 { + prec = 1 + } else if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } else if noFrac64(fbits) { + prec = 1 + } + return +} + +func jsonFloatStrconvFmtPrec32(f float32) (fmt byte, prec int8) { + fmt = 'f' + prec = -1 + // directly handle Modf (to get fractions) and Abs (to get absolute) + fbits := math.Float32bits(f) + abs := math.Float32frombits(fbits &^ (1 << 31)) + if abs == 0 || abs == 1 { + prec = 1 + } else if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } else if noFrac32(fbits) { + prec = 1 + } + return +} + +var errJsonNoBd = errors.New("descBd unsupported in json") diff --git a/vendor/github.com/ugorji/go/codec/json.fastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/json.fastpath.mono.generated.go new file mode 100644 index 000000000..b873b0aef --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.fastpath.mono.generated.go @@ -0,0 +1,12482 @@ +//go:build !notmono && !codec.notmono && !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "slices" + "sort" +) + +type fastpathEJsonBytes struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderJsonBytes, *encFnInfo, reflect.Value) +} +type fastpathDJsonBytes struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderJsonBytes, *decFnInfo, reflect.Value) +} +type fastpathEsJsonBytes [56]fastpathEJsonBytes +type fastpathDsJsonBytes [56]fastpathDJsonBytes +type fastpathETJsonBytes struct{} +type fastpathDTJsonBytes struct{} + +func (helperEncDriverJsonBytes) fastpathEList() *fastpathEsJsonBytes { + var i uint = 0 + var s fastpathEsJsonBytes + fn := func(v interface{}, fe func(*encoderJsonBytes, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathEJsonBytes{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderJsonBytes).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderJsonBytes).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderJsonBytes).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderJsonBytes).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderJsonBytes).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderJsonBytes).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderJsonBytes).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderJsonBytes).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderJsonBytes).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderJsonBytes).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderJsonBytes).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderJsonBytes).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderJsonBytes).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderJsonBytes).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderJsonBytes).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderJsonBytes).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderJsonBytes).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderJsonBytes).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderJsonBytes).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderJsonBytes).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderJsonBytes).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderJsonBytes).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderJsonBytes).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderJsonBytes).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderJsonBytes).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderJsonBytes).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderJsonBytes).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderJsonBytes).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderJsonBytes).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderJsonBytes).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderJsonBytes).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderJsonBytes).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderJsonBytes).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderJsonBytes).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderJsonBytes).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderJsonBytes).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderJsonBytes).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderJsonBytes).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderJsonBytes).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderJsonBytes).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderJsonBytes).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderJsonBytes).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderJsonBytes).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderJsonBytes).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderJsonBytes).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderJsonBytes).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderJsonBytes).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderJsonBytes).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderJsonBytes).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderJsonBytes).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderJsonBytes).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderJsonBytes).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderJsonBytes).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderJsonBytes).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderJsonBytes).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderJsonBytes).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverJsonBytes) fastpathDList() *fastpathDsJsonBytes { + var i uint = 0 + var s fastpathDsJsonBytes + fn := func(v interface{}, fd func(*decoderJsonBytes, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDJsonBytes{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderJsonBytes).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderJsonBytes).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderJsonBytes).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderJsonBytes).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderJsonBytes).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderJsonBytes).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderJsonBytes).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderJsonBytes).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderJsonBytes).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderJsonBytes).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderJsonBytes).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderJsonBytes).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderJsonBytes).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderJsonBytes).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderJsonBytes).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderJsonBytes).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderJsonBytes).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderJsonBytes).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderJsonBytes).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderJsonBytes).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderJsonBytes).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderJsonBytes).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderJsonBytes).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderJsonBytes).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderJsonBytes).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderJsonBytes).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderJsonBytes).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderJsonBytes).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderJsonBytes).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderJsonBytes).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderJsonBytes).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderJsonBytes).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderJsonBytes).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderJsonBytes).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderJsonBytes).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderJsonBytes).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderJsonBytes).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderJsonBytes).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderJsonBytes).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderJsonBytes).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderJsonBytes).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderJsonBytes).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderJsonBytes).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderJsonBytes).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderJsonBytes).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderJsonBytes).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderJsonBytes).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderJsonBytes).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderJsonBytes).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderJsonBytes).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderJsonBytes).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderJsonBytes).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderJsonBytes).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderJsonBytes).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderJsonBytes).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderJsonBytes).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverJsonBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonBytes) bool { + var ft fastpathETJsonBytes + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderJsonBytes) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETJsonBytes) EncSliceIntfV(v []interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceIntfV(v []interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETJsonBytes) EncSliceStringV(v []string, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceStringV(v []string, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETJsonBytes) EncSliceBytesV(v [][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceBytesV(v [][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETJsonBytes) EncSliceFloat32V(v []float32, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceFloat32V(v []float32, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETJsonBytes) EncSliceFloat64V(v []float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceFloat64V(v []float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETJsonBytes) EncSliceUint8V(v []uint8, e *encoderJsonBytes) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETJsonBytes) EncAsMapSliceUint8V(v []uint8, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETJsonBytes) EncSliceUint64V(v []uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceUint64V(v []uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETJsonBytes) EncSliceIntV(v []int, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceIntV(v []int, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETJsonBytes) EncSliceInt32V(v []int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceInt32V(v []int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETJsonBytes) EncSliceInt64V(v []int64, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceInt64V(v []int64, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonBytes + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETJsonBytes) EncSliceBoolV(v []bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonBytes) EncAsMapSliceBoolV(v []bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETJsonBytes) EncMapStringIntfV(v map[string]interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETJsonBytes) EncMapStringStringV(v map[string]string, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETJsonBytes) EncMapStringBytesV(v map[string][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETJsonBytes) EncMapStringUint8V(v map[string]uint8, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETJsonBytes) EncMapStringUint64V(v map[string]uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETJsonBytes) EncMapStringIntV(v map[string]int, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETJsonBytes) EncMapStringInt32V(v map[string]int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETJsonBytes) EncMapStringFloat64V(v map[string]float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETJsonBytes) EncMapStringBoolV(v map[string]bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETJsonBytes) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETJsonBytes) EncMapUint8StringV(v map[uint8]string, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETJsonBytes) EncMapUint8BytesV(v map[uint8][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETJsonBytes) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETJsonBytes) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETJsonBytes) EncMapUint8IntV(v map[uint8]int, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETJsonBytes) EncMapUint8Int32V(v map[uint8]int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETJsonBytes) EncMapUint8Float64V(v map[uint8]float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETJsonBytes) EncMapUint8BoolV(v map[uint8]bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETJsonBytes) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETJsonBytes) EncMapUint64StringV(v map[uint64]string, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETJsonBytes) EncMapUint64BytesV(v map[uint64][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETJsonBytes) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETJsonBytes) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETJsonBytes) EncMapUint64IntV(v map[uint64]int, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETJsonBytes) EncMapUint64Int32V(v map[uint64]int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETJsonBytes) EncMapUint64Float64V(v map[uint64]float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETJsonBytes) EncMapUint64BoolV(v map[uint64]bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETJsonBytes) EncMapIntIntfV(v map[int]interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETJsonBytes) EncMapIntStringV(v map[int]string, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETJsonBytes) EncMapIntBytesV(v map[int][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETJsonBytes) EncMapIntUint8V(v map[int]uint8, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETJsonBytes) EncMapIntUint64V(v map[int]uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETJsonBytes) EncMapIntIntV(v map[int]int, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETJsonBytes) EncMapIntInt32V(v map[int]int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETJsonBytes) EncMapIntFloat64V(v map[int]float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETJsonBytes) EncMapIntBoolV(v map[int]bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETJsonBytes) EncMapInt32IntfV(v map[int32]interface{}, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETJsonBytes) EncMapInt32StringV(v map[int32]string, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETJsonBytes) EncMapInt32BytesV(v map[int32][]byte, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETJsonBytes) EncMapInt32Uint8V(v map[int32]uint8, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETJsonBytes) EncMapInt32Uint64V(v map[int32]uint64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETJsonBytes) EncMapInt32IntV(v map[int32]int, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETJsonBytes) EncMapInt32Int32V(v map[int32]int32, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETJsonBytes) EncMapInt32Float64V(v map[int32]float64, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonBytes) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonBytes{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETJsonBytes) EncMapInt32BoolV(v map[int32]bool, e *encoderJsonBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverJsonBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonBytes) bool { + var ft fastpathDTJsonBytes + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderJsonBytes) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTJsonBytes) DecSliceIntfY(v []interface{}, d *decoderJsonBytes) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceIntfN(v []interface{}, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTJsonBytes) DecSliceStringY(v []string, d *decoderJsonBytes) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceStringN(v []string, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTJsonBytes) DecSliceBytesY(v [][]byte, d *decoderJsonBytes) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceBytesN(v [][]byte, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTJsonBytes) DecSliceFloat32Y(v []float32, d *decoderJsonBytes) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceFloat32N(v []float32, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTJsonBytes) DecSliceFloat64Y(v []float64, d *decoderJsonBytes) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceFloat64N(v []float64, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTJsonBytes) DecSliceUint8Y(v []uint8, d *decoderJsonBytes) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTJsonBytes) DecSliceUint8N(v []uint8, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderJsonBytes) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTJsonBytes) DecSliceUint64Y(v []uint64, d *decoderJsonBytes) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceUint64N(v []uint64, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTJsonBytes) DecSliceIntY(v []int, d *decoderJsonBytes) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceIntN(v []int, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTJsonBytes) DecSliceInt32Y(v []int32, d *decoderJsonBytes) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceInt32N(v []int32, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTJsonBytes) DecSliceInt64Y(v []int64, d *decoderJsonBytes) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceInt64N(v []int64, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTJsonBytes) DecSliceBoolY(v []bool, d *decoderJsonBytes) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonBytes) DecSliceBoolN(v []bool, d *decoderJsonBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderJsonBytes) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonBytes) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringStringL(v map[string]string, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonBytes) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonBytes) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonBytes) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonBytes) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringIntL(v map[string]int, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonBytes) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonBytes) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonBytes) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonBytes) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonBytes) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonBytes) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonBytes) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntStringL(v map[int]string, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonBytes) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonBytes) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonBytes) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonBytes) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntIntL(v map[int]int, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonBytes) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonBytes) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonBytes) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonBytes) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonBytes) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderJsonBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} + +type fastpathEJsonIO struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderJsonIO, *encFnInfo, reflect.Value) +} +type fastpathDJsonIO struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderJsonIO, *decFnInfo, reflect.Value) +} +type fastpathEsJsonIO [56]fastpathEJsonIO +type fastpathDsJsonIO [56]fastpathDJsonIO +type fastpathETJsonIO struct{} +type fastpathDTJsonIO struct{} + +func (helperEncDriverJsonIO) fastpathEList() *fastpathEsJsonIO { + var i uint = 0 + var s fastpathEsJsonIO + fn := func(v interface{}, fe func(*encoderJsonIO, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathEJsonIO{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderJsonIO).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderJsonIO).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderJsonIO).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderJsonIO).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderJsonIO).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderJsonIO).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderJsonIO).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderJsonIO).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderJsonIO).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderJsonIO).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderJsonIO).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderJsonIO).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderJsonIO).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderJsonIO).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderJsonIO).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderJsonIO).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderJsonIO).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderJsonIO).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderJsonIO).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderJsonIO).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderJsonIO).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderJsonIO).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderJsonIO).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderJsonIO).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderJsonIO).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderJsonIO).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderJsonIO).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderJsonIO).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderJsonIO).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderJsonIO).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderJsonIO).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderJsonIO).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderJsonIO).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderJsonIO).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderJsonIO).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderJsonIO).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderJsonIO).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderJsonIO).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderJsonIO).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderJsonIO).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderJsonIO).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderJsonIO).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderJsonIO).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderJsonIO).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderJsonIO).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderJsonIO).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderJsonIO).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderJsonIO).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderJsonIO).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderJsonIO).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderJsonIO).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderJsonIO).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderJsonIO).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderJsonIO).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderJsonIO).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderJsonIO).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverJsonIO) fastpathDList() *fastpathDsJsonIO { + var i uint = 0 + var s fastpathDsJsonIO + fn := func(v interface{}, fd func(*decoderJsonIO, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDJsonIO{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderJsonIO).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderJsonIO).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderJsonIO).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderJsonIO).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderJsonIO).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderJsonIO).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderJsonIO).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderJsonIO).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderJsonIO).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderJsonIO).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderJsonIO).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderJsonIO).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderJsonIO).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderJsonIO).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderJsonIO).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderJsonIO).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderJsonIO).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderJsonIO).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderJsonIO).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderJsonIO).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderJsonIO).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderJsonIO).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderJsonIO).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderJsonIO).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderJsonIO).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderJsonIO).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderJsonIO).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderJsonIO).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderJsonIO).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderJsonIO).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderJsonIO).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderJsonIO).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderJsonIO).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderJsonIO).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderJsonIO).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderJsonIO).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderJsonIO).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderJsonIO).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderJsonIO).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderJsonIO).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderJsonIO).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderJsonIO).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderJsonIO).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderJsonIO).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderJsonIO).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderJsonIO).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderJsonIO).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderJsonIO).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderJsonIO).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderJsonIO).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderJsonIO).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderJsonIO).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderJsonIO).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderJsonIO).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderJsonIO).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderJsonIO).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverJsonIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonIO) bool { + var ft fastpathETJsonIO + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderJsonIO) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETJsonIO) EncSliceIntfV(v []interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceIntfV(v []interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETJsonIO) EncSliceStringV(v []string, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceStringV(v []string, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETJsonIO) EncSliceBytesV(v [][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceBytesV(v [][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETJsonIO) EncSliceFloat32V(v []float32, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceFloat32V(v []float32, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETJsonIO) EncSliceFloat64V(v []float64, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceFloat64V(v []float64, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETJsonIO) EncSliceUint8V(v []uint8, e *encoderJsonIO) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETJsonIO) EncAsMapSliceUint8V(v []uint8, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETJsonIO) EncSliceUint64V(v []uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceUint64V(v []uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETJsonIO) EncSliceIntV(v []int, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceIntV(v []int, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETJsonIO) EncSliceInt32V(v []int32, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceInt32V(v []int32, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETJsonIO) EncSliceInt64V(v []int64, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceInt64V(v []int64, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETJsonIO + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETJsonIO) EncSliceBoolV(v []bool, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETJsonIO) EncAsMapSliceBoolV(v []bool, e *encoderJsonIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETJsonIO) EncMapStringIntfV(v map[string]interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETJsonIO) EncMapStringStringV(v map[string]string, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETJsonIO) EncMapStringBytesV(v map[string][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETJsonIO) EncMapStringUint8V(v map[string]uint8, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETJsonIO) EncMapStringUint64V(v map[string]uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETJsonIO) EncMapStringIntV(v map[string]int, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETJsonIO) EncMapStringInt32V(v map[string]int32, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETJsonIO) EncMapStringFloat64V(v map[string]float64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETJsonIO) EncMapStringBoolV(v map[string]bool, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETJsonIO) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETJsonIO) EncMapUint8StringV(v map[uint8]string, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETJsonIO) EncMapUint8BytesV(v map[uint8][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETJsonIO) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETJsonIO) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETJsonIO) EncMapUint8IntV(v map[uint8]int, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETJsonIO) EncMapUint8Int32V(v map[uint8]int32, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETJsonIO) EncMapUint8Float64V(v map[uint8]float64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETJsonIO) EncMapUint8BoolV(v map[uint8]bool, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETJsonIO) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETJsonIO) EncMapUint64StringV(v map[uint64]string, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETJsonIO) EncMapUint64BytesV(v map[uint64][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETJsonIO) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETJsonIO) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETJsonIO) EncMapUint64IntV(v map[uint64]int, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETJsonIO) EncMapUint64Int32V(v map[uint64]int32, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETJsonIO) EncMapUint64Float64V(v map[uint64]float64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETJsonIO) EncMapUint64BoolV(v map[uint64]bool, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETJsonIO) EncMapIntIntfV(v map[int]interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETJsonIO) EncMapIntStringV(v map[int]string, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETJsonIO) EncMapIntBytesV(v map[int][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETJsonIO) EncMapIntUint8V(v map[int]uint8, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETJsonIO) EncMapIntUint64V(v map[int]uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETJsonIO) EncMapIntIntV(v map[int]int, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETJsonIO) EncMapIntInt32V(v map[int]int32, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETJsonIO) EncMapIntFloat64V(v map[int]float64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETJsonIO) EncMapIntBoolV(v map[int]bool, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETJsonIO) EncMapInt32IntfV(v map[int32]interface{}, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETJsonIO) EncMapInt32StringV(v map[int32]string, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETJsonIO) EncMapInt32BytesV(v map[int32][]byte, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETJsonIO) EncMapInt32Uint8V(v map[int32]uint8, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETJsonIO) EncMapInt32Uint64V(v map[int32]uint64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETJsonIO) EncMapInt32IntV(v map[int32]int, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETJsonIO) EncMapInt32Int32V(v map[int32]int32, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETJsonIO) EncMapInt32Float64V(v map[int32]float64, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderJsonIO) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETJsonIO{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETJsonIO) EncMapInt32BoolV(v map[int32]bool, e *encoderJsonIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverJsonIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonIO) bool { + var ft fastpathDTJsonIO + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderJsonIO) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTJsonIO) DecSliceIntfY(v []interface{}, d *decoderJsonIO) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceIntfN(v []interface{}, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTJsonIO) DecSliceStringY(v []string, d *decoderJsonIO) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceStringN(v []string, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTJsonIO) DecSliceBytesY(v [][]byte, d *decoderJsonIO) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceBytesN(v [][]byte, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTJsonIO) DecSliceFloat32Y(v []float32, d *decoderJsonIO) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceFloat32N(v []float32, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTJsonIO) DecSliceFloat64Y(v []float64, d *decoderJsonIO) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceFloat64N(v []float64, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTJsonIO) DecSliceUint8Y(v []uint8, d *decoderJsonIO) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTJsonIO) DecSliceUint8N(v []uint8, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderJsonIO) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTJsonIO) DecSliceUint64Y(v []uint64, d *decoderJsonIO) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceUint64N(v []uint64, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTJsonIO) DecSliceIntY(v []int, d *decoderJsonIO) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceIntN(v []int, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTJsonIO) DecSliceInt32Y(v []int32, d *decoderJsonIO) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceInt32N(v []int32, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTJsonIO) DecSliceInt64Y(v []int64, d *decoderJsonIO) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceInt64N(v []int64, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTJsonIO) DecSliceBoolY(v []bool, d *decoderJsonIO) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTJsonIO) DecSliceBoolN(v []bool, d *decoderJsonIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderJsonIO) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonIO) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringStringL(v map[string]string, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonIO) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonIO) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonIO) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonIO) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringIntL(v map[string]int, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonIO) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonIO) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonIO) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonIO) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonIO) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonIO) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonIO) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonIO) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonIO) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonIO) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonIO) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonIO) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonIO) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonIO) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonIO) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonIO) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonIO) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonIO) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonIO) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonIO) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonIO) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonIO) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonIO) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntStringL(v map[int]string, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonIO) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonIO) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonIO) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonIO) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntIntL(v map[int]int, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonIO) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonIO) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonIO) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderJsonIO) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderJsonIO) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderJsonIO) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderJsonIO) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderJsonIO) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderJsonIO) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderJsonIO) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderJsonIO) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderJsonIO) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTJsonIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTJsonIO) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderJsonIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go index 569b0cc9e..ba0e55005 100644 --- a/vendor/github.com/ugorji/go/codec/json.go +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -1,3 +1,5 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -18,6 +20,7 @@ package codec import ( "encoding/base64" + "io" "math" "reflect" "strconv" @@ -27,154 +30,36 @@ import ( "unicode/utf8" ) -//-------------------------------- +type jsonEncDriver[T encWriter] struct { + noBuiltInTypes + h *JsonHandle + e *encoderBase + s *bitset256 // safe set for characters (taking h.HTMLAsIs into consideration) -// jsonLits and jsonLitb are defined at the package level, -// so they are guaranteed to be stored efficiently, making -// for better append/string comparison/etc. -// -// (anecdotal evidence from some benchmarking on go 1.20 devel in 20220104) -const jsonLits = `"true"false"null"` + w T + // se interfaceExtWrapper -var jsonLitb = []byte(jsonLits) + enc encoderI -const ( - jsonLitT = 1 - jsonLitF = 6 - jsonLitN = 12 -) + timeFmtLayout string + byteFmter jsonBytesFmter + // ---- cpu cache line boundary??? -const jsonEncodeUintSmallsString = "" + - "00010203040506070809" + - "10111213141516171819" + - "20212223242526272829" + - "30313233343536373839" + - "40414243444546474849" + - "50515253545556575859" + - "60616263646566676869" + - "70717273747576777879" + - "80818283848586878889" + - "90919293949596979899" + // bytes2Arr bool + // time2Num bool + timeFmt jsonTimeFmt + bytesFmt jsonBytesFmt -var jsonEncodeUintSmallsStringBytes = []byte(jsonEncodeUintSmallsString) - -const ( - jsonU4Chk2 = '0' - jsonU4Chk1 = 'a' - 10 - jsonU4Chk0 = 'A' - 10 -) - -const ( - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: - // - If we see first character of null, false or true, - // do not validate subsequent characters. - // - e.g. if we see a n, assume null and skip next 3 characters, - // and do not validate they are ull. - // P.S. Do not expect a significant decoding boost from this. - jsonValidateSymbols = true - - // jsonEscapeMultiByteUnicodeSep controls whether some unicode characters - // that are valid json but may bomb in some contexts are escaped during encoeing. - // - // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. - // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. - jsonEscapeMultiByteUnicodeSep = true - - // jsonRecognizeBoolNullInQuotedStr is used during decoding into a blank interface{} - // to control whether we detect quoted values of bools and null where a map key is expected, - // and treat as nil, true or false. - jsonNakedBoolNullInQuotedStr = true - - // jsonManualInlineDecRdInHotZones controls whether we manually inline some decReader calls. - // - // encode performance is at par with libraries that just iterate over bytes directly, - // because encWr (with inlined bytesEncAppender calls) is inlined. - // Conversely, decode performance suffers because decRd (with inlined bytesDecReader calls) - // isn't inlinable. - // - // To improve decode performamnce from json: - // - readn1 is only called for \u - // - consequently, to optimize json decoding, we specifically need inlining - // for bytes use-case of some other decReader methods: - // - jsonReadAsisChars, skipWhitespace (advance) and jsonReadNum - // - AND THEN readn3, readn4 (for ull, rue and alse). - // - (readn1 is only called when a char is escaped). - // - without inlining, we still pay the cost of a method invocationK, and this dominates time - // - To mitigate, we manually inline in hot zones - // *excluding places where used sparingly (e.g. nextValueBytes, and other atypical cases)*. - // - jsonReadAsisChars *only* called in: appendStringAsBytes - // - advance called: everywhere - // - jsonReadNum: decNumBytes, DecodeNaked - // - From running go test (our anecdotal findings): - // - calling jsonReadAsisChars in appendStringAsBytes: 23431 - // - calling jsonReadNum in decNumBytes: 15251 - // - calling jsonReadNum in DecodeNaked: 612 - // Consequently, we manually inline jsonReadAsisChars (in appendStringAsBytes) - // and jsonReadNum (in decNumbytes) - jsonManualInlineDecRdInHotZones = true - - jsonSpacesOrTabsLen = 128 - - // jsonAlwaysReturnInternString = false -) - -var ( - // jsonTabs and jsonSpaces are used as caches for indents - jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte - - jsonCharHtmlSafeSet bitset256 - jsonCharSafeSet bitset256 -) - -func init() { - var i byte - for i = 0; i < jsonSpacesOrTabsLen; i++ { - jsonSpaces[i] = ' ' - jsonTabs[i] = '\t' - } - - // populate the safe values as true: note: ASCII control characters are (0-31) - // jsonCharSafeSet: all true except (0-31) " \ - // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & - for i = 32; i < utf8.RuneSelf; i++ { - switch i { - case '"', '\\': - case '<', '>', '&': - jsonCharSafeSet.set(i) // = true - default: - jsonCharSafeSet.set(i) - jsonCharHtmlSafeSet.set(i) - } - } -} - -// ---------------- - -type jsonEncState struct { di int8 // indent per: if negative, use tabs d bool // indenting? dl uint16 // indent level -} - -func (x jsonEncState) captureState() interface{} { return x } -func (x *jsonEncState) restoreState(v interface{}) { *x = v.(jsonEncState) } - -type jsonEncDriver struct { - noBuiltInTypes - h *JsonHandle - - // se interfaceExtWrapper - - // ---- cpu cache line boundary? - jsonEncState ks bool // map key as string is byte // integer as string typical bool - rawext bool // rawext configured on the handle - s *bitset256 // safe set for characters (taking h.HTMLAsIs into consideration) + rawext bool // rawext configured on the handle // buf *[]byte // used mostly for encoding []byte @@ -190,137 +75,132 @@ type jsonEncDriver struct { // Consequently, 35 characters should be sufficient for encoding time, integers or floats. // We use up all the remaining bytes to make this use full cache lines. b [48]byte - - e Encoder } -func (e *jsonEncDriver) encoder() *Encoder { return &e.e } - -func (e *jsonEncDriver) writeIndent() { - e.e.encWr.writen1('\n') +func (e *jsonEncDriver[T]) writeIndent() { + e.w.writen1('\n') x := int(e.di) * int(e.dl) if e.di < 0 { x = -x - for x > jsonSpacesOrTabsLen { - e.e.encWr.writeb(jsonTabs[:]) - x -= jsonSpacesOrTabsLen + for x > len(jsonTabs) { + e.w.writeb(jsonTabs[:]) + x -= len(jsonTabs) } - e.e.encWr.writeb(jsonTabs[:x]) + e.w.writeb(jsonTabs[:x]) } else { - for x > jsonSpacesOrTabsLen { - e.e.encWr.writeb(jsonSpaces[:]) - x -= jsonSpacesOrTabsLen + for x > len(jsonSpaces) { + e.w.writeb(jsonSpaces[:]) + x -= len(jsonSpaces) } - e.e.encWr.writeb(jsonSpaces[:x]) + e.w.writeb(jsonSpaces[:x]) } } -func (e *jsonEncDriver) WriteArrayElem() { - if e.e.c != containerArrayStart { - e.e.encWr.writen1(',') +func (e *jsonEncDriver[T]) WriteArrayElem(firstTime bool) { + if !firstTime { + e.w.writen1(',') } if e.d { e.writeIndent() } } -func (e *jsonEncDriver) WriteMapElemKey() { - if e.e.c != containerMapStart { - e.e.encWr.writen1(',') +func (e *jsonEncDriver[T]) WriteMapElemKey(firstTime bool) { + if !firstTime { + e.w.writen1(',') } if e.d { e.writeIndent() } } -func (e *jsonEncDriver) WriteMapElemValue() { +func (e *jsonEncDriver[T]) WriteMapElemValue() { if e.d { - e.e.encWr.writen2(':', ' ') + e.w.writen2(':', ' ') } else { - e.e.encWr.writen1(':') + e.w.writen1(':') } } -func (e *jsonEncDriver) EncodeNil() { +func (e *jsonEncDriver[T]) EncodeNil() { // We always encode nil as just null (never in quotes) // so we can easily decode if a nil in the json stream ie if initial token is n. - e.e.encWr.writestr(jsonLits[jsonLitN : jsonLitN+4]) + // e.w.writestr(jsonLits[jsonLitN : jsonLitN+4]) + e.w.writeb(jsonNull) } -func (e *jsonEncDriver) EncodeTime(t time.Time) { +func (e *jsonEncDriver[T]) encodeIntAsUint(v int64, quotes bool) { + neg := v < 0 + if neg { + v = -v + } + e.encodeUint(neg, quotes, uint64(v)) +} + +func (e *jsonEncDriver[T]) EncodeTime(t time.Time) { // Do NOT use MarshalJSON, as it allocates internally. // instead, we call AppendFormat directly, using our scratch buffer (e.b) if t.IsZero() { e.EncodeNil() - } else { + return + } + switch e.timeFmt { + case jsonTimeFmtStringLayout: e.b[0] = '"' - b := fmtTime(t, time.RFC3339Nano, e.b[1:1]) + b := t.AppendFormat(e.b[1:1], e.timeFmtLayout) e.b[len(b)+1] = '"' - e.e.encWr.writeb(e.b[:len(b)+2]) + e.w.writeb(e.b[:len(b)+2]) + case jsonTimeFmtUnix: + e.encodeIntAsUint(t.Unix(), false) + case jsonTimeFmtUnixMilli: + e.encodeIntAsUint(t.UnixMilli(), false) + case jsonTimeFmtUnixMicro: + e.encodeIntAsUint(t.UnixMicro(), false) + case jsonTimeFmtUnixNano: + e.encodeIntAsUint(t.UnixNano(), false) } } -func (e *jsonEncDriver) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (e *jsonEncDriver[T]) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { if ext == SelfExt { - e.e.encodeValue(baseRV(rv), e.h.fnNoExt(basetype)) + e.enc.encodeAs(rv, basetype, false) } else if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() + e.writeNilBytes() } else { - e.e.encode(v) + e.enc.encodeI(v) } } -func (e *jsonEncDriver) EncodeRawExt(re *RawExt) { - // only encodes re.Value (never re.Data) - if re.Value == nil { - e.EncodeNil() +func (e *jsonEncDriver[T]) EncodeRawExt(re *RawExt) { + if re.Data != nil { + e.w.writeb(re.Data) + } else if re.Value != nil { + e.enc.encodeI(re.Value) } else { - e.e.encode(re.Value) + e.EncodeNil() } } -var jsonEncBoolStrs = [2][2]string{ - {jsonLits[jsonLitF : jsonLitF+5], jsonLits[jsonLitT : jsonLitT+4]}, - {jsonLits[jsonLitF-1 : jsonLitF+6], jsonLits[jsonLitT-1 : jsonLitT+5]}, +func (e *jsonEncDriver[T]) EncodeBool(b bool) { + e.w.writestr(jsonEncBoolStrs[bool2int(e.ks && e.e.c == containerMapKey)%2][bool2int(b)%2]) } -func (e *jsonEncDriver) EncodeBool(b bool) { - e.e.encWr.writestr( - jsonEncBoolStrs[bool2int(e.ks && e.e.c == containerMapKey)%2][bool2int(b)%2]) -} - -// func (e *jsonEncDriver) EncodeBool(b bool) { -// if e.ks && e.e.c == containerMapKey { -// if b { -// e.e.encWr.writestr(jsonLits[jsonLitT-1 : jsonLitT+5]) -// } else { -// e.e.encWr.writestr(jsonLits[jsonLitF-1 : jsonLitF+6]) -// } -// } else { -// if b { -// e.e.encWr.writestr(jsonLits[jsonLitT : jsonLitT+4]) -// } else { -// e.e.encWr.writestr(jsonLits[jsonLitF : jsonLitF+5]) -// } -// } -// } - -func (e *jsonEncDriver) encodeFloat(f float64, bitsize, fmt byte, prec int8) { +func (e *jsonEncDriver[T]) encodeFloat(f float64, bitsize, fmt byte, prec int8) { var blen uint if e.ks && e.e.c == containerMapKey { blen = 2 + uint(len(strconv.AppendFloat(e.b[1:1], f, fmt, int(prec), int(bitsize)))) // _ = e.b[:blen] e.b[0] = '"' e.b[blen-1] = '"' - e.e.encWr.writeb(e.b[:blen]) + e.w.writeb(e.b[:blen]) } else { - e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), int(bitsize))) + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), int(bitsize))) } } -func (e *jsonEncDriver) EncodeFloat64(f float64) { +func (e *jsonEncDriver[T]) EncodeFloat64(f float64) { if math.IsNaN(f) || math.IsInf(f, 0) { e.EncodeNil() return @@ -329,7 +209,7 @@ func (e *jsonEncDriver) EncodeFloat64(f float64) { e.encodeFloat(f, 64, fmt, prec) } -func (e *jsonEncDriver) EncodeFloat32(f float32) { +func (e *jsonEncDriver[T]) EncodeFloat32(f float32) { if math.IsNaN(float64(f)) || math.IsInf(float64(f), 0) { e.EncodeNil() return @@ -338,60 +218,11 @@ func (e *jsonEncDriver) EncodeFloat32(f float32) { e.encodeFloat(float64(f), 32, fmt, prec) } -func (e *jsonEncDriver) encodeUint(neg bool, quotes bool, u uint64) { - // copied mostly from std library: strconv - // this should only be called on 64bit OS. - - // const smallsString = jsonEncodeUintSmallsString - var ss = jsonEncodeUintSmallsStringBytes - - // typically, 19 or 20 bytes sufficient for decimal encoding a uint64 - // var a [24]byte - var a = e.b[0:24] - var i = uint(len(a)) - - if quotes { - i-- - setByteAt(a, i, '"') - // a[i] = '"' - } - // u guaranteed to fit into a uint (as we are not 32bit OS) - var is uint - var us = uint(u) - for us >= 100 { - is = us % 100 * 2 - us /= 100 - i -= 2 - setByteAt(a, i+1, byteAt(ss, is+1)) - setByteAt(a, i, byteAt(ss, is)) - // a[i+1] = smallsString[is+1] - // a[i+0] = smallsString[is+0] - } - - // us < 100 - is = us * 2 - i-- - setByteAt(a, i, byteAt(ss, is+1)) - // a[i] = smallsString[is+1] - if us >= 10 { - i-- - setByteAt(a, i, byteAt(ss, is)) - // a[i] = smallsString[is] - } - if neg { - i-- - setByteAt(a, i, '-') - // a[i] = '-' - } - if quotes { - i-- - setByteAt(a, i, '"') - // a[i] = '"' - } - e.e.encWr.writeb(a[i:]) +func (e *jsonEncDriver[T]) encodeUint(neg bool, quotes bool, u uint64) { + e.w.writeb(jsonEncodeUint(neg, quotes, u, &e.b)) } -func (e *jsonEncDriver) EncodeInt(v int64) { +func (e *jsonEncDriver[T]) EncodeInt(v int64) { quotes := e.is == 'A' || e.is == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.e.c == containerMapKey) @@ -400,9 +231,9 @@ func (e *jsonEncDriver) EncodeInt(v int64) { blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) e.b[0] = '"' e.b[blen-1] = '"' - e.e.encWr.writeb(e.b[:blen]) + e.w.writeb(e.b[:blen]) } else { - e.e.encWr.writeb(strconv.AppendInt(e.b[:0], v, 10)) + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) } return } @@ -414,7 +245,7 @@ func (e *jsonEncDriver) EncodeInt(v int64) { } } -func (e *jsonEncDriver) EncodeUint(v uint64) { +func (e *jsonEncDriver[T]) EncodeUint(v uint64) { quotes := e.is == 'A' || e.is == 'L' && v > 1<<53 || (e.ks && e.e.c == containerMapKey) @@ -424,9 +255,9 @@ func (e *jsonEncDriver) EncodeUint(v uint64) { blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) e.b[0] = '"' e.b[blen-1] = '"' - e.e.encWr.writeb(e.b[:blen]) + e.w.writeb(e.b[:blen]) } else { - e.e.encWr.writeb(strconv.AppendUint(e.b[:0], v, 10)) + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) } return } @@ -434,7 +265,7 @@ func (e *jsonEncDriver) EncodeUint(v uint64) { e.encodeUint(false, quotes, v) } -func (e *jsonEncDriver) EncodeString(v string) { +func (e *jsonEncDriver[T]) EncodeString(v string) { if e.h.StringToRaw { e.EncodeStringBytesRaw(bytesView(v)) return @@ -442,35 +273,80 @@ func (e *jsonEncDriver) EncodeString(v string) { e.quoteStr(v) } -func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { - // if encoding raw bytes and RawBytesExt is configured, use it to encode - if v == nil { - e.EncodeNil() - return - } +func (e *jsonEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.w.writeqstr(v) } +func (e *jsonEncDriver[T]) EncodeStringBytesRaw(v []byte) { if e.rawext { - iv := e.h.RawBytesExt.ConvertExt(v) + // explicitly convert v to interface{} so that v doesn't escape to heap + iv := e.h.RawBytesExt.ConvertExt(any(v)) if iv == nil { e.EncodeNil() } else { - e.e.encode(iv) + e.enc.encodeI(iv) } return } - slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if e.bytesFmt == jsonBytesFmtArray { + e.WriteArrayStart(len(v)) + for j := range v { + e.WriteArrayElem(j == 0) + e.encodeUint(false, false, uint64(v[j])) + } + e.WriteArrayEnd() + return + } + + // hardcode base64, so we call direct (not via interface) and hopefully inline + var slen int + if e.bytesFmt == jsonBytesFmtBase64 { + slen = base64.StdEncoding.EncodedLen(len(v)) + } else { + slen = e.byteFmter.EncodedLen(len(v)) + } + slen += 2 // bs := e.e.blist.check(*e.buf, n)[:slen] // *e.buf = bs - bs := e.e.blist.peek(slen, false) - bs = bs[:slen] + bs := e.e.blist.peek(slen, false)[:slen] + + if e.bytesFmt == jsonBytesFmtBase64 { + base64.StdEncoding.Encode(bs[1:], v) + } else { + e.byteFmter.Encode(bs[1:], v) + } - base64.StdEncoding.Encode(bs[1:], v) bs[len(bs)-1] = '"' bs[0] = '"' - e.e.encWr.writeb(bs) + e.w.writeb(bs) +} + +func (e *jsonEncDriver[T]) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *jsonEncDriver[T]) writeNilOr(v []byte) { + if !e.h.NilCollectionToZeroLength { + v = jsonNull + } + e.w.writeb(v) +} + +func (e *jsonEncDriver[T]) writeNilBytes() { + e.writeNilOr(jsonArrayEmpty) +} + +func (e *jsonEncDriver[T]) writeNilArray() { + e.writeNilOr(jsonArrayEmpty) +} + +func (e *jsonEncDriver[T]) writeNilMap() { + e.writeNilOr(jsonMapEmpty) } // indent is done as below: @@ -478,78 +354,89 @@ func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { // - newline and indent are added before each ending, // except there was no entry (so we can have {} or []) -func (e *jsonEncDriver) WriteArrayStart(length int) { +func (e *jsonEncDriver[T]) WriteArrayEmpty() { + e.w.writen2('[', ']') +} + +func (e *jsonEncDriver[T]) WriteMapEmpty() { + e.w.writen2('{', '}') +} + +func (e *jsonEncDriver[T]) WriteArrayStart(length int) { if e.d { e.dl++ } - e.e.encWr.writen1('[') + e.w.writen1('[') } -func (e *jsonEncDriver) WriteArrayEnd() { +func (e *jsonEncDriver[T]) WriteArrayEnd() { if e.d { e.dl-- + // No need as encoder handles zero-len already + // if e.e.c != containerArrayStart { e.writeIndent() } - e.e.encWr.writen1(']') + e.w.writen1(']') } -func (e *jsonEncDriver) WriteMapStart(length int) { +func (e *jsonEncDriver[T]) WriteMapStart(length int) { if e.d { e.dl++ } - e.e.encWr.writen1('{') + e.w.writen1('{') } -func (e *jsonEncDriver) WriteMapEnd() { +func (e *jsonEncDriver[T]) WriteMapEnd() { if e.d { e.dl-- - if e.e.c != containerMapStart { - e.writeIndent() - } + // No need as encoder handles zero-len already + // if e.e.c != containerMapStart { + e.writeIndent() } - e.e.encWr.writen1('}') + e.w.writen1('}') } -func (e *jsonEncDriver) quoteStr(s string) { +func (e *jsonEncDriver[T]) quoteStr(s string) { // adapted from std pkg encoding/json const hex = "0123456789abcdef" - w := e.e.w() - w.writen1('"') + e.w.writen1('"') var i, start uint for i < uint(len(s)) { // encode all bytes < 0x20 (except \r, \n). // also encode < > & to prevent security holes when served to some browsers. - // We optimize for ascii, by assumining that most characters are in the BMP + // We optimize for ascii, by assuming that most characters are in the BMP // and natively consumed by json without much computation. // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { - if e.s.isset(s[i]) { + b := s[i] + if e.s.isset(b) { i++ continue } - // b := s[i] - if s[i] < utf8.RuneSelf { + if b < utf8.RuneSelf { if start < i { - w.writestr(s[start:i]) + e.w.writestr(s[start:i]) } - switch s[i] { - case '\\', '"': - w.writen2('\\', s[i]) + switch b { + case '\\': + e.w.writen2('\\', '\\') + case '"': + e.w.writen2('\\', '"') case '\n': - w.writen2('\\', 'n') - case '\r': - w.writen2('\\', 'r') - case '\b': - w.writen2('\\', 'b') - case '\f': - w.writen2('\\', 'f') + e.w.writen2('\\', 'n') case '\t': - w.writen2('\\', 't') + e.w.writen2('\\', 't') + case '\r': + e.w.writen2('\\', 'r') + case '\b': + e.w.writen2('\\', 'b') + case '\f': + e.w.writen2('\\', 'f') default: - w.writestr(`\u00`) - w.writen2(hex[s[i]>>4], hex[s[i]&0xF]) + e.w.writestr(`\u00`) + e.w.writen2(hex[b>>4], hex[b&0xF]) } i++ start = i @@ -558,9 +445,9 @@ func (e *jsonEncDriver) quoteStr(s string) { c, size := utf8.DecodeRuneInString(s[i:]) if c == utf8.RuneError && size == 1 { // meaning invalid encoding (so output as-is) if start < i { - w.writestr(s[start:i]) + e.w.writestr(s[start:i]) } - w.writestr(`\uFFFD`) + e.w.writestr(`\uFFFD`) i++ start = i continue @@ -569,10 +456,10 @@ func (e *jsonEncDriver) quoteStr(s string) { // Both technically valid JSON, but bomb on JSONP, so fix here *unconditionally*. if jsonEscapeMultiByteUnicodeSep && (c == '\u2028' || c == '\u2029') { if start < i { - w.writestr(s[start:i]) + e.w.writestr(s[start:i]) } - w.writestr(`\u202`) - w.writen1(hex[c&0xF]) + e.w.writestr(`\u202`) + e.w.writen1(hex[c&0xF]) i += uint(size) start = i continue @@ -580,80 +467,72 @@ func (e *jsonEncDriver) quoteStr(s string) { i += uint(size) } if start < uint(len(s)) { - w.writestr(s[start:]) + e.w.writestr(s[start:]) } - w.writen1('"') + e.w.writen1('"') } -func (e *jsonEncDriver) atEndOfEncode() { +func (e *jsonEncDriver[T]) atEndOfEncode() { if e.h.TermWhitespace { var c byte = ' ' // default is that scalar is written, so output space if e.e.c != 0 { c = '\n' // for containers (map/list), output a newline } - e.e.encWr.writen1(c) + e.w.writen1(c) } } // ---------- -type jsonDecState struct { - rawext bool // rawext configured on the handle +type jsonDecDriver[T decReader] struct { + noBuiltInTypes + decDriverNoopNumberHelper + h *JsonHandle + d *decoderBase + + r T + + // scratch buffer used for base64 decoding (DecodeBytes in reuseBuf mode), + // or reading doubleQuoted string (DecodeStringAsBytes, DecodeNaked) + buf []byte tok uint8 // used to store the token read right after skipWhiteSpace _ bool // found null _ byte // padding bstr [4]byte // scratch used for string \UXXX parsing - // scratch buffer used for base64 decoding (DecodeBytes in reuseBuf mode), - // or reading doubleQuoted string (DecodeStringAsBytes, DecodeNaked) - buf *[]byte -} - -func (x jsonDecState) captureState() interface{} { return x } -func (x *jsonDecState) restoreState(v interface{}) { *x = v.(jsonDecState) } - -type jsonDecDriver struct { - noBuiltInTypes - decDriverNoopNumberHelper - h *JsonHandle - - jsonDecState + jsonHandleOpts // se interfaceExtWrapper // ---- cpu cache line boundary? - d Decoder + // bytes bool + + dec decoderI } -func (d *jsonDecDriver) descBd() (s string) { panic("descBd unsupported") } - -func (d *jsonDecDriver) decoder() *Decoder { - return &d.d -} - -func (d *jsonDecDriver) ReadMapStart() int { +func (d *jsonDecDriver[T]) ReadMapStart() int { d.advance() if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) return containerLenNil } if d.tok != '{' { - d.d.errorf("read map - expect char '%c' but got char '%c'", '{', d.tok) + halt.errorByte("read map - expect char '{' but got char: ", d.tok) } d.tok = 0 return containerLenUnknown } -func (d *jsonDecDriver) ReadArrayStart() int { +func (d *jsonDecDriver[T]) ReadArrayStart() int { d.advance() if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) return containerLenNil } if d.tok != '[' { - d.d.errorf("read array - expect char '%c' but got char '%c'", '[', d.tok) + halt.errorByte("read array - expect char '[' but got char ", d.tok) } d.tok = 0 return containerLenUnknown @@ -665,24 +544,12 @@ func (d *jsonDecDriver) ReadArrayStart() int { // However, this forces CheckBreak to always incur a function call if there was whitespace, // with no clear benefit. -func (d *jsonDecDriver) CheckBreak() bool { +func (d *jsonDecDriver[T]) CheckBreak() bool { d.advance() return d.tok == '}' || d.tok == ']' } -func (d *jsonDecDriver) ReadArrayElem() { - const xc uint8 = ',' - if d.d.c != containerArrayStart { - d.advance() - if d.tok != xc { - d.readDelimError(xc) - } - d.tok = 0 - } -} - -func (d *jsonDecDriver) ReadArrayEnd() { - const xc uint8 = ']' +func (d *jsonDecDriver[T]) checkSep(xc byte) { d.advance() if d.tok != xc { d.readDelimError(xc) @@ -690,118 +557,96 @@ func (d *jsonDecDriver) ReadArrayEnd() { d.tok = 0 } -func (d *jsonDecDriver) ReadMapElemKey() { - const xc uint8 = ',' - if d.d.c != containerMapStart { - d.advance() - if d.tok != xc { - d.readDelimError(xc) - } - d.tok = 0 +func (d *jsonDecDriver[T]) ReadArrayElem(firstTime bool) { + if !firstTime { + d.checkSep(',') } } -func (d *jsonDecDriver) ReadMapElemValue() { - const xc uint8 = ':' - d.advance() - if d.tok != xc { - d.readDelimError(xc) - } - d.tok = 0 +func (d *jsonDecDriver[T]) ReadArrayEnd() { + d.checkSep(']') } -func (d *jsonDecDriver) ReadMapEnd() { - const xc uint8 = '}' - d.advance() - if d.tok != xc { - d.readDelimError(xc) - } - d.tok = 0 +func (d *jsonDecDriver[T]) ReadMapElemKey(firstTime bool) { + d.ReadArrayElem(firstTime) } -func (d *jsonDecDriver) readDelimError(xc uint8) { - d.d.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok) +func (d *jsonDecDriver[T]) ReadMapElemValue() { + d.checkSep(':') +} + +func (d *jsonDecDriver[T]) ReadMapEnd() { + d.checkSep('}') +} + +//go:inline +func (d *jsonDecDriver[T]) readDelimError(xc uint8) { + halt.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok) } // MARKER: checkLit takes the readn(3|4) result as a parameter so they can be inlined. // We pass the array directly to errorf, as passing slice pushes past inlining threshold, // and passing slice also might cause allocation of the bs array on the heap. -func (d *jsonDecDriver) checkLit3(got, expect [3]byte) { - d.tok = 0 +func (d *jsonDecDriver[T]) checkLit3(got, expect [3]byte) { if jsonValidateSymbols && got != expect { - d.d.errorf("expecting %s: got %s", expect, got) + jsonCheckLitErr3(got, expect) } -} - -func (d *jsonDecDriver) checkLit4(got, expect [4]byte) { d.tok = 0 +} + +func (d *jsonDecDriver[T]) checkLit4(got, expect [4]byte) { if jsonValidateSymbols && got != expect { - d.d.errorf("expecting %s: got %s", expect, got) + jsonCheckLitErr4(got, expect) } + d.tok = 0 } -func (d *jsonDecDriver) skipWhitespace() { - d.tok = d.d.decRd.skipWhitespace() +func (d *jsonDecDriver[T]) skipWhitespace() { + d.tok = d.r.skipWhitespace() } -func (d *jsonDecDriver) advance() { - if d.tok == 0 { +func (d *jsonDecDriver[T]) advance() { + // handles jsonReadNum returning possibly non-printable value as tok + if d.tok < 33 { // d.tok == 0 { d.skipWhitespace() } } -func (d *jsonDecDriver) nextValueBytes(v []byte) []byte { - v, cursor := d.nextValueBytesR(v) - decNextValueBytesHelper{d: &d.d}.bytesRdV(&v, cursor) - return v -} - -func (d *jsonDecDriver) nextValueBytesR(v0 []byte) (v []byte, cursor uint) { - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - dr := &d.d.decRd - +func (d *jsonDecDriver[T]) nextValueBytes() []byte { consumeString := func() { TOP: - bs := dr.jsonReadAsisChars() - h.appendN(&v, bs...) - if bs[len(bs)-1] != '"' { - // last char is '\', so consume next one and try again - h.append1(&v, dr.readn1()) + _, c := d.r.jsonReadAsisChars() + if c == '\\' { // consume next one and try again + d.r.readn1() goto TOP } } - d.advance() // ignore leading whitespace - cursor = d.d.rb.c - 1 // cursor starts just before non-whitespace token + d.advance() // ignore leading whitespace + d.r.startRecording() + // cursor = d.d.rb.c - 1 // cursor starts just before non-whitespace token switch d.tok { default: - h.appendN(&v, dr.jsonReadNum()...) + _, d.tok = d.r.jsonReadNum() case 'n': - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) - h.appendS(&v, jsonLits[jsonLitN:jsonLitN+4]) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) case 'f': - d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.d.decRd.readn4()) - h.appendS(&v, jsonLits[jsonLitF:jsonLitF+5]) + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) case 't': - d.checkLit3([3]byte{'r', 'u', 'e'}, d.d.decRd.readn3()) - h.appendS(&v, jsonLits[jsonLitT:jsonLitT+4]) + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) case '"': - h.append1(&v, '"') consumeString() + d.tok = 0 case '{', '[': var elem struct{} var stack []struct{} stack = append(stack, elem) - h.append1(&v, d.tok) - for len(stack) != 0 { - c := dr.readn1() - h.append1(&v, c) + c := d.r.readn1() switch c { case '"': consumeString() @@ -811,64 +656,91 @@ func (d *jsonDecDriver) nextValueBytesR(v0 []byte) (v []byte, cursor uint) { stack = stack[:len(stack)-1] } } + d.tok = 0 } - d.tok = 0 - return + return d.r.stopRecording() } -func (d *jsonDecDriver) TryNil() bool { +func (d *jsonDecDriver[T]) TryNil() bool { d.advance() - // we shouldn't try to see if quoted "null" was here, right? - // only the plain string: `null` denotes a nil (ie not quotes) + // we don't try to see if quoted "null" was here. + // only the plain string: null denotes a nil (ie not quotes) if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) return true } return false } -func (d *jsonDecDriver) DecodeBool() (v bool) { +func (d *jsonDecDriver[T]) DecodeBool() (v bool) { d.advance() // bool can be in quotes if and only if it's a map key fquot := d.d.c == containerMapKey && d.tok == '"' if fquot { - d.tok = d.d.decRd.readn1() + d.tok = d.r.readn1() } switch d.tok { case 'f': - d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.d.decRd.readn4()) + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) // v = false case 't': - d.checkLit3([3]byte{'r', 'u', 'e'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) v = true case 'n': - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) // v = false default: - d.d.errorf("decode bool: got first char %c", d.tok) + halt.errorByte("decode bool: got first char: ", d.tok) // v = false // "unreachable" } if fquot { - d.d.decRd.readn1() + d.r.readn1() } return } -func (d *jsonDecDriver) DecodeTime() (t time.Time) { +func (d *jsonDecDriver[T]) DecodeTime() (t time.Time) { // read string, and pass the string into json.unmarshal d.advance() if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) return } - d.ensureReadingString() - bs := d.readUnescapedString() - t, err := time.Parse(time.RFC3339, stringView(bs)) - d.d.onerror(err) + var bs []byte + // if a number, use the timeFmtNum + if d.tok != '"' { + bs, d.tok = d.r.jsonReadNum() + i := d.parseInt64(bs) + switch d.timeFmtNum { + case jsonTimeFmtUnix: + t = time.Unix(i, 0) + case jsonTimeFmtUnixMilli: + t = time.UnixMilli(i) + case jsonTimeFmtUnixMicro: + t = time.UnixMicro(i) + case jsonTimeFmtUnixNano: + t = time.Unix(0, i) + default: + halt.errorStr("invalid timeFmtNum") + } + return + } + + // d.tok is now '"' + // d.ensureReadingString() + bs = d.readUnescapedString() + var err error + for _, v := range d.timeFmtLayouts { + t, err = time.Parse(v, stringView(bs)) + if err == nil { + return + } + } + halt.errorStr("error decoding time") return } -func (d *jsonDecDriver) ContainerType() (vt valueType) { +func (d *jsonDecDriver[T]) ContainerType() (vt valueType) { // check container type by checking the first char d.advance() @@ -882,7 +754,7 @@ func (d *jsonDecDriver) ContainerType() (vt valueType) { } else if d.tok == '[' { return valueTypeArray } else if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) return valueTypeNil } else if d.tok == '"' { return valueTypeString @@ -890,48 +762,42 @@ func (d *jsonDecDriver) ContainerType() (vt valueType) { return valueTypeUnset } -func (d *jsonDecDriver) decNumBytes() (bs []byte) { +func (d *jsonDecDriver[T]) decNumBytes() (bs []byte) { d.advance() - dr := &d.d.decRd if d.tok == '"' { - bs = dr.readUntil('"') + bs = d.r.jsonReadUntilDblQuote() + d.tok = 0 } else if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, dr.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) } else { - if jsonManualInlineDecRdInHotZones { - if dr.bytes { - bs = dr.rb.jsonReadNum() - } else { - bs = dr.ri.jsonReadNum() - } - } else { - bs = dr.jsonReadNum() - } + bs, d.tok = d.r.jsonReadNum() } - d.tok = 0 return } -func (d *jsonDecDriver) DecodeUint64() (u uint64) { +func (d *jsonDecDriver[T]) DecodeUint64() (u uint64) { b := d.decNumBytes() u, neg, ok := parseInteger_bytes(b) if neg { - d.d.errorf("negative number cannot be decoded as uint64") + halt.errorf("negative number cannot be decoded as uint64: %s", any(b)) } if !ok { - d.d.onerror(strconvParseErr(b, "ParseUint")) + halt.onerror(strconvParseErr(b, "ParseUint")) } return } -func (d *jsonDecDriver) DecodeInt64() (v int64) { - b := d.decNumBytes() +func (d *jsonDecDriver[T]) DecodeInt64() (v int64) { + return d.parseInt64(d.decNumBytes()) +} + +func (d *jsonDecDriver[T]) parseInt64(b []byte) (v int64) { u, neg, ok := parseInteger_bytes(b) if !ok { - d.d.onerror(strconvParseErr(b, "ParseInt")) + halt.onerror(strconvParseErr(b, "ParseInt")) } if chkOvf.Uint2Int(u, neg) { - d.d.errorf("overflow decoding number from %s", b) + halt.errorBytes("overflow decoding number from ", b) } if neg { v = -int64(u) @@ -941,85 +807,95 @@ func (d *jsonDecDriver) DecodeInt64() (v int64) { return } -func (d *jsonDecDriver) DecodeFloat64() (f float64) { +func (d *jsonDecDriver[T]) DecodeFloat64() (f float64) { var err error bs := d.decNumBytes() if len(bs) == 0 { return } f, err = parseFloat64(bs) - d.d.onerror(err) + halt.onerror(err) return } -func (d *jsonDecDriver) DecodeFloat32() (f float32) { +func (d *jsonDecDriver[T]) DecodeFloat32() (f float32) { var err error bs := d.decNumBytes() if len(bs) == 0 { return } f, err = parseFloat32(bs) - d.d.onerror(err) + halt.onerror(err) return } -func (d *jsonDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (d *jsonDecDriver[T]) advanceNil() (ok bool) { d.advance() if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return true + } + return false +} + +func (d *jsonDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if d.advanceNil() { return } - if ext == nil { - re := rv.(*RawExt) - re.Tag = xtag - d.d.decode(&re.Value) - } else if ext == SelfExt { - d.d.decodeValue(baseRV(rv), d.h.fnNoExt(basetype)) + if ext == SelfExt { + d.dec.decodeAs(rv, basetype, false) } else { - d.d.interfaceExtConvertAndDecode(rv, ext) + d.dec.interfaceExtConvertAndDecode(rv, ext) } } -func (d *jsonDecDriver) decBytesFromArray(bs []byte) []byte { - if bs != nil { - bs = bs[:0] +func (d *jsonDecDriver[T]) DecodeRawExt(re *RawExt) { + if d.advanceNil() { + return + } + d.dec.decode(&re.Value) +} + +func (d *jsonDecDriver[T]) decBytesFromArray(bs []byte) []byte { + d.advance() + if d.tok != ']' { + bs = append(bs, uint8(d.DecodeUint64())) + d.advance() } - d.tok = 0 - bs = append(bs, uint8(d.DecodeUint64())) - d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset) for d.tok != ']' { if d.tok != ',' { - d.d.errorf("read array element - expect char '%c' but got char '%c'", ',', d.tok) + halt.errorByte("read array element - expect char ',' but got char: ", d.tok) } d.tok = 0 bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) - d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset) + d.advance() } d.tok = 0 return bs } -func (d *jsonDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { - d.d.decByteState = decByteStateNone +func (d *jsonDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) { d.advance() + state = dBytesDetach if d.tok == 'n' { - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) - return nil + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return } + state = dBytesAttachBuffer // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. if d.rawext { - bsOut = bs - d.d.interfaceExtConvertAndDecode(&bsOut, d.h.RawBytesExt) + d.buf = d.buf[:0] + d.dec.interfaceExtConvertAndDecode(&d.buf, d.h.RawBytesExt) + bs = d.buf return } // check if an "array" of uint8's (see ContainerType for how to infer if an array) if d.tok == '[' { + d.tok = 0 // bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } - return d.decBytesFromArray(bs) + bs = d.decBytesFromArray(d.buf[:0]) + d.buf = bs + return } // base64 encodes []byte{} as "", and we encode nil []byte as null. @@ -1027,115 +903,88 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { d.ensureReadingString() bs1 := d.readUnescapedString() + // base64 is most compact of supported formats; it's decodedlen is sufficient for all slen := base64.StdEncoding.DecodedLen(len(bs1)) if slen == 0 { - bsOut = []byte{} - } else if slen <= cap(bs) { - bsOut = bs[:slen] - } else if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bsOut = d.d.blist.check(*d.buf, slen) - bsOut = bsOut[:slen] - *d.buf = bsOut + bs = zeroByteSlice + state = dBytesDetach + } else if slen <= cap(d.buf) { + bs = d.buf[:slen] } else { - bsOut = make([]byte, slen) + d.buf = d.d.blist.putGet(d.buf, slen)[:slen] + bs = d.buf } - slen2, err := base64.StdEncoding.Decode(bsOut, bs1) - if err != nil { - d.d.errorf("error decoding base64 binary '%s': %v", bs1, err) - } - if slen != slen2 { - bsOut = bsOut[:slen2] + var err error + for _, v := range d.byteFmters { + // slen := v.DecodedLen(len(bs1)) + slen, err = v.Decode(bs, bs1) + if err == nil { + bs = bs[:slen] + return + } } + halt.errorf("error decoding byte string '%s': %v", any(bs1), err) return } -func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { - d.d.decByteState = decByteStateNone +func (d *jsonDecDriver[T]) DecodeStringAsBytes() (bs []byte, state dBytesAttachState) { d.advance() + var cond bool // common case - hoist outside the switch statement if d.tok == '"' { - return d.dblQuoteStringAsBytes() + d.tok = 0 + bs, cond = d.dblQuoteStringAsBytes() + state = d.d.attachState(cond) + return } + state = dBytesDetach // handle non-string scalar: null, true, false or a number switch d.tok { case 'n': - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) - return nil // []byte{} + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + // out = nil // []byte{} case 'f': - d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.d.decRd.readn4()) - return jsonLitb[jsonLitF : jsonLitF+5] + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + bs = jsonLitb[jsonLitF : jsonLitF+5] case 't': - d.checkLit3([3]byte{'r', 'u', 'e'}, d.d.decRd.readn3()) - return jsonLitb[jsonLitT : jsonLitT+4] + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + bs = jsonLitb[jsonLitT : jsonLitT+4] default: // try to parse a valid number - d.tok = 0 - return d.d.decRd.jsonReadNum() + bs, d.tok = d.r.jsonReadNum() + state = d.d.attachState(!d.d.bytes) } + return } -func (d *jsonDecDriver) ensureReadingString() { +func (d *jsonDecDriver[T]) ensureReadingString() { if d.tok != '"' { - d.d.errorf("expecting string starting with '\"'; got '%c'", d.tok) + halt.errorByte("expecting string starting with '\"'; got ", d.tok) } } -func (d *jsonDecDriver) readUnescapedString() (bs []byte) { +func (d *jsonDecDriver[T]) readUnescapedString() (bs []byte) { // d.ensureReadingString() - bs = d.d.decRd.readUntil('"') + bs = d.r.jsonReadUntilDblQuote() d.tok = 0 return } -func (d *jsonDecDriver) dblQuoteStringAsBytes() (buf []byte) { - checkUtf8 := d.h.ValidateUnicode - d.d.decByteState = decByteStateNone - // use a local buf variable, so we don't do pointer chasing within loop - buf = (*d.buf)[:0] - dr := &d.d.decRd - d.tok = 0 +func (d *jsonDecDriver[T]) dblQuoteStringAsBytes() (buf []byte, usingBuf bool) { + bs, c := d.r.jsonReadAsisChars() + if c == '"' { + return bs, !d.d.bytes + } + buf = append(d.buf[:0], bs...) - var bs []byte - var c byte - var firstTime bool = true + checkUtf8 := d.h.ValidateUnicode + usingBuf = true for { - if firstTime { - firstTime = false - if dr.bytes { - bs = dr.rb.jsonReadAsisChars() - if bs[len(bs)-1] == '"' { - d.d.decByteState = decByteStateZerocopy - return bs[:len(bs)-1] - } - goto APPEND - } - } - - if jsonManualInlineDecRdInHotZones { - if dr.bytes { - bs = dr.rb.jsonReadAsisChars() - } else { - bs = dr.ri.jsonReadAsisChars() - } - } else { - bs = dr.jsonReadAsisChars() - } - - APPEND: - _ = bs[0] // bounds check hint - slice must be > 0 elements - buf = append(buf, bs[:len(bs)-1]...) - c = bs[len(bs)-1] - - if c == '"' { - break - } - // c is now '\' - c = dr.readn1() + c = d.r.readn1() switch c { case '"', '\\', '/', '\'': @@ -1153,30 +1002,35 @@ func (d *jsonDecDriver) dblQuoteStringAsBytes() (buf []byte) { case 'u': rr := d.appendStringAsBytesSlashU() if checkUtf8 && rr == unicode.ReplacementChar { - d.d.errorf("invalid UTF-8 character found after: %s", buf) + d.buf = buf + halt.errorBytes("invalid UTF-8 character found after: ", buf) } buf = append(buf, d.bstr[:utf8.EncodeRune(d.bstr[:], rr)]...) default: - *d.buf = buf - d.d.errorf("unsupported escaped value: %c", c) + d.buf = buf + halt.errorByte("unsupported escaped value: ", c) + } + + bs, c = d.r.jsonReadAsisChars() + buf = append(buf, bs...) + if c == '"' { + break } } - *d.buf = buf - d.d.decByteState = decByteStateReuseBuf + d.buf = buf return } -func (d *jsonDecDriver) appendStringAsBytesSlashU() (r rune) { +func (d *jsonDecDriver[T]) appendStringAsBytesSlashU() (r rune) { var rr uint32 - var csu [2]byte - var cs [4]byte = d.d.decRd.readn4() + cs := d.r.readn4() if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar { return unicode.ReplacementChar } r = rune(rr) if utf16.IsSurrogate(r) { - csu = d.d.decRd.readn2() - cs = d.d.decRd.readn4() + csu := d.r.readn2() + cs = d.r.readn4() if csu[0] == '\\' && csu[1] == 'u' { if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar { return unicode.ReplacementChar @@ -1188,49 +1042,22 @@ func (d *jsonDecDriver) appendStringAsBytesSlashU() (r rune) { return } -func jsonSlashURune(cs [4]byte) (rr uint32) { - for _, c := range cs { - // best to use explicit if-else - // - not a table, etc which involve memory loads, array lookup with bounds checks, etc - if c >= '0' && c <= '9' { - rr = rr*16 + uint32(c-jsonU4Chk2) - } else if c >= 'a' && c <= 'f' { - rr = rr*16 + uint32(c-jsonU4Chk1) - } else if c >= 'A' && c <= 'F' { - rr = rr*16 + uint32(c-jsonU4Chk0) - } else { - return unicode.ReplacementChar - } - } - return -} - -func (d *jsonDecDriver) nakedNum(z *fauxUnion, bs []byte) (err error) { - // Note: nakedNum is NEVER called with a zero-length []byte - if d.h.PreferFloat { - z.v = valueTypeFloat - z.f, err = parseFloat64(bs) - } else { - err = parseNumber(bs, z, d.h.SignedInteger) - } - return -} - -func (d *jsonDecDriver) DecodeNaked() { +func (d *jsonDecDriver[T]) DecodeNaked() { z := d.d.naked() d.advance() var bs []byte + var err error switch d.tok { case 'n': - d.checkLit3([3]byte{'u', 'l', 'l'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) z.v = valueTypeNil case 'f': - d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.d.decRd.readn4()) + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) z.v = valueTypeBool z.b = false case 't': - d.checkLit3([3]byte{'r', 'u', 'e'}, d.d.decRd.readn3()) + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) z.v = valueTypeBool z.b = true case '{': @@ -1239,8 +1066,10 @@ func (d *jsonDecDriver) DecodeNaked() { z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart case '"': // if a string, and MapKeyAsString, then try to decode it as a bool or number first - bs = d.dblQuoteStringAsBytes() - if jsonNakedBoolNullInQuotedStr && + d.tok = 0 + bs, z.b = d.dblQuoteStringAsBytes() + att := d.d.attachState(z.b) + if jsonNakedBoolNumInQuotedStr && d.h.MapKeyAsString && len(bs) > 0 && d.d.c == containerMapKey { switch string(bs) { // case "null": // nil is never quoted @@ -1251,213 +1080,146 @@ func (d *jsonDecDriver) DecodeNaked() { case "false": z.v = valueTypeBool z.b = false - default: - // check if a number: float, int or uint - if err := d.nakedNum(z, bs); err != nil { + default: // check if a number: float, int or uint + if err = jsonNakedNum(z, bs, d.h.PreferFloat, d.h.SignedInteger); err != nil { z.v = valueTypeString - z.s = d.d.stringZC(bs) + z.s = d.d.detach2Str(bs, att) } } } else { z.v = valueTypeString - z.s = d.d.stringZC(bs) + z.s = d.d.detach2Str(bs, att) } default: // number - bs = d.d.decRd.jsonReadNum() - d.tok = 0 + bs, d.tok = d.r.jsonReadNum() if len(bs) == 0 { - d.d.errorf("decode number from empty string") + halt.errorStr("decode number from empty string") } - if err := d.nakedNum(z, bs); err != nil { - d.d.errorf("decode number from %s: %v", bs, err) + if err = jsonNakedNum(z, bs, d.h.PreferFloat, d.h.SignedInteger); err != nil { + halt.errorf("decode number from %s: %v", any(bs), err) } } } -//---------------------- - -// JsonHandle is a handle for JSON encoding format. -// -// Json is comprehensively supported: -// - decodes numbers into interface{} as int, uint or float64 -// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. -// - decode integers from float formatted numbers e.g. 1.27e+8 -// - decode any json value (numbers, bool, etc) from quoted strings -// - configurable way to encode/decode []byte . -// by default, encodes and decodes []byte using base64 Std Encoding -// - UTF-8 support for encoding and decoding -// -// It has better performance than the json library in the standard library, -// by leveraging the performance improvements of the codec library. -// -// In addition, it doesn't read more bytes than necessary during a decode, which allows -// reading multiple values from a stream containing json and non-json content. -// For example, a user can read a json value, then a cbor value, then a msgpack value, -// all from the same stream in sequence. -// -// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are -// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. -// -// Note also that the float values for NaN, +Inf or -Inf are encoded as null, -// as suggested by NOTE 4 of the ECMA-262 ECMAScript Language Specification 5.1 edition. -// see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf . -// -// Note the following behaviour differences vs std-library encoding/json package: -// - struct field names matched in case-sensitive manner -type JsonHandle struct { - textEncodingType - BasicHandle - - // Indent indicates how a value is encoded. - // - If positive, indent by that number of spaces. - // - If negative, indent by that number of tabs. - Indent int8 - - // IntegerAsString controls how integers (signed and unsigned) are encoded. - // - // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. - // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. - // This can be mitigated by configuring how to encode integers. - // - // IntegerAsString interpretes the following values: - // - if 'L', then encode integers > 2^53 as a json string. - // - if 'A', then encode all integers as a json string - // containing the exact integer representation as a decimal. - // - else encode all integers as a json number (default) - IntegerAsString byte - - // HTMLCharsAsIs controls how to encode some special characters to html: < > & - // - // By default, we encode them as \uXXX - // to prevent security holes when served from some browsers. - HTMLCharsAsIs bool - - // PreferFloat says that we will default to decoding a number as a float. - // If not set, we will examine the characters of the number and decode as an - // integer type if it doesn't have any of the characters [.eE]. - PreferFloat bool - - // TermWhitespace says that we add a whitespace character - // at the end of an encoding. - // - // The whitespace is important, especially if using numbers in a context - // where multiple items are written to a stream. - TermWhitespace bool - - // MapKeyAsString says to encode all map keys as strings. - // - // Use this to enforce strict json output. - // The only caveat is that nil value is ALWAYS written as null (never as "null") - MapKeyAsString bool - - // _ uint64 // padding (cache line) - - // Note: below, we store hardly-used items e.g. RawBytesExt. - // These values below may straddle a cache line, but they are hardly-used, - // so shouldn't contribute to false-sharing except in rare cases. - - // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. - // If not configured, raw bytes are encoded to/from base64 text. - RawBytesExt InterfaceExt -} - -func (h *JsonHandle) isJson() bool { return true } - -// Name returns the name of the handle: json -func (h *JsonHandle) Name() string { return "json" } - -func (h *JsonHandle) desc(bd byte) string { return string(bd) } - -func (h *JsonHandle) typical() bool { - return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' -} - -func (h *JsonHandle) newEncDriver() encDriver { - var e = &jsonEncDriver{h: h} - // var x []byte - // e.buf = &x - e.e.e = e - e.e.js = true - e.e.init(h) - e.reset() - return e -} - -func (h *JsonHandle) newDecDriver() decDriver { - var d = &jsonDecDriver{h: h} - var x []byte - d.buf = &x - d.d.d = d - d.d.js = true - d.d.jsms = h.MapKeyAsString - d.d.init(h) - d.reset() - return d -} - -func (e *jsonEncDriver) resetState() { +func (e *jsonEncDriver[T]) reset() { e.dl = 0 -} - -func (e *jsonEncDriver) reset() { - e.resetState() + // e.resetState() // (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) // cache values from the handle e.typical = e.h.typical() if e.h.HTMLCharsAsIs { - e.s = &jsonCharSafeSet + e.s = &jsonCharSafeBitset } else { - e.s = &jsonCharHtmlSafeSet + e.s = &jsonCharHtmlSafeBitset } - e.rawext = e.h.RawBytesExt != nil e.di = int8(e.h.Indent) e.d = e.h.Indent != 0 e.ks = e.h.MapKeyAsString e.is = e.h.IntegerAsString + + var ho jsonHandleOpts + ho.reset(e.h) + e.timeFmt = ho.timeFmt + e.bytesFmt = ho.bytesFmt + e.timeFmtLayout = "" + e.byteFmter = nil + if len(ho.timeFmtLayouts) > 0 { + e.timeFmtLayout = ho.timeFmtLayouts[0] + } + if len(ho.byteFmters) > 0 { + e.byteFmter = ho.byteFmters[0] + } + e.rawext = ho.rawext } -func (d *jsonDecDriver) resetState() { - *d.buf = d.d.blist.check(*d.buf, 256) +func (d *jsonDecDriver[T]) reset() { + d.buf = d.d.blist.check(d.buf, 256) d.tok = 0 + // d.resetState() + d.jsonHandleOpts.reset(d.h) } -func (d *jsonDecDriver) reset() { - d.resetState() - d.rawext = d.h.RawBytesExt != nil -} +// ---- +// +// The following below are similar across all format files (except for the format name). +// +// We keep them together here, so that we can easily copy and compare. -func jsonFloatStrconvFmtPrec64(f float64) (fmt byte, prec int8) { - fmt = 'f' - prec = -1 - fbits := math.Float64bits(f) - abs := math.Float64frombits(fbits &^ (1 << 63)) - if abs == 0 || abs == 1 { - prec = 1 - } else if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } else if noFrac64(fbits) { - prec = 1 +// ---- + +func (d *jsonEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*JsonHandle) + d.e = shared + if shared.bytes { + fp = jsonFpEncBytes + } else { + fp = jsonFpEncIO } + // d.w.init() + d.init2(enc) return } -func jsonFloatStrconvFmtPrec32(f float32) (fmt byte, prec int8) { - fmt = 'f' - prec = -1 - // directly handle Modf (to get fractions) and Abs (to get absolute) - fbits := math.Float32bits(f) - abs := math.Float32frombits(fbits &^ (1 << 31)) - if abs == 0 || abs == 1 { - prec = 1 - } else if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } else if noFrac32(fbits) { - prec = 1 +func (e *jsonEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) } + +// func (e *jsonEncDriver[T]) writeStringAsisDblQuoted(v string) { e.w.writeqstr(v) } +func (e *jsonEncDriver[T]) writerEnd() { e.w.end() } + +func (e *jsonEncDriver[T]) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *jsonEncDriver[T]) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +// ---- + +func (d *jsonDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*JsonHandle) + d.d = shared + if shared.bytes { + fp = jsonFpDecBytes + } else { + fp = jsonFpDecIO } + // d.r.init() + d.init2(dec) return } -var _ decDriverContainerTracker = (*jsonDecDriver)(nil) -var _ encDriverContainerTracker = (*jsonEncDriver)(nil) -var _ decDriver = (*jsonDecDriver)(nil) -var _ encDriver = (*jsonEncDriver)(nil) +func (d *jsonDecDriver[T]) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *jsonDecDriver[T]) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *jsonDecDriver[T]) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +// ---- (custom stanza) + +func (d *jsonDecDriver[T]) descBd() (s string) { + halt.onerror(errJsonNoBd) + return +} + +func (d *jsonEncDriver[T]) init2(enc encoderI) { + d.enc = enc + // d.e.js = true +} + +func (d *jsonDecDriver[T]) init2(dec decoderI) { + d.dec = dec + // var x []byte + // d.buf = &x + // d.buf = new([]byte) + d.buf = d.buf[:0] + // d.d.js = true + d.d.jsms = d.h.MapKeyAsString +} diff --git a/vendor/github.com/ugorji/go/codec/json.mono.generated.go b/vendor/github.com/ugorji/go/codec/json.mono.generated.go new file mode 100644 index 000000000..12da25c30 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.mono.generated.go @@ -0,0 +1,8339 @@ +//go:build !notmono && !codec.notmono + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + + "encoding/base64" + "io" + "math" + "reflect" + "slices" + "sort" + "strconv" + "sync" + "time" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +type helperEncDriverJsonBytes struct{} +type encFnJsonBytes struct { + i encFnInfo + fe func(*encoderJsonBytes, *encFnInfo, reflect.Value) +} +type encRtidFnJsonBytes struct { + rtid uintptr + fn *encFnJsonBytes +} +type encoderJsonBytes struct { + dh helperEncDriverJsonBytes + fp *fastpathEsJsonBytes + e jsonEncDriverBytes + encoderBase +} +type helperDecDriverJsonBytes struct{} +type decFnJsonBytes struct { + i decFnInfo + fd func(*decoderJsonBytes, *decFnInfo, reflect.Value) +} +type decRtidFnJsonBytes struct { + rtid uintptr + fn *decFnJsonBytes +} +type decoderJsonBytes struct { + dh helperDecDriverJsonBytes + fp *fastpathDsJsonBytes + d jsonDecDriverBytes + decoderBase +} +type jsonEncDriverBytes struct { + noBuiltInTypes + h *JsonHandle + e *encoderBase + s *bitset256 + + w bytesEncAppender + + enc encoderI + + timeFmtLayout string + byteFmter jsonBytesFmter + + timeFmt jsonTimeFmt + bytesFmt jsonBytesFmt + + di int8 + d bool + dl uint16 + + ks bool + is byte + + typical bool + + rawext bool + + b [48]byte +} +type jsonDecDriverBytes struct { + noBuiltInTypes + decDriverNoopNumberHelper + h *JsonHandle + d *decoderBase + + r bytesDecReader + + buf []byte + + tok uint8 + _ bool + _ byte + bstr [4]byte + + jsonHandleOpts + + dec decoderI +} + +func (e *encoderJsonBytes) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderJsonBytes) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderJsonBytes) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderJsonBytes) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderJsonBytes) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderJsonBytes) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderJsonBytes) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderJsonBytes) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderJsonBytes) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderJsonBytes) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderJsonBytes) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderJsonBytes) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderJsonBytes) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderJsonBytes) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderJsonBytes) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderJsonBytes) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderJsonBytes) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderJsonBytes) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderJsonBytes) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderJsonBytes) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderJsonBytes) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderJsonBytes) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderJsonBytes) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderJsonBytes) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderJsonBytes) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderJsonBytes) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderJsonBytes) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderJsonBytes) kSeqFn(rt reflect.Type) (fn *encFnJsonBytes) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderJsonBytes) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnJsonBytes + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderJsonBytes) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnJsonBytes + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderJsonBytes) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderJsonBytes) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderJsonBytes) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderJsonBytes) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderJsonBytes) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderJsonBytes) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderJsonBytes) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderJsonBytes) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnJsonBytes + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonBytes) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnJsonBytes) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderJsonBytes) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsJsonBytes) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderJsonBytes) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderJsonBytes) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderJsonBytes) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderJsonBytes) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderJsonBytes) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderJsonBytes) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderJsonBytes) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderJsonBytes) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderJsonBytes) encodeValue(rv reflect.Value, fn *encFnJsonBytes) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderJsonBytes) encodeValueNonNil(rv reflect.Value, fn *encFnJsonBytes) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderJsonBytes) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderJsonBytes) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderJsonBytes) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderJsonBytes) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderJsonBytes) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderJsonBytes) fn(t reflect.Type) *encFnJsonBytes { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderJsonBytes) fnNoExt(t reflect.Type) *encFnJsonBytes { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderJsonBytes) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderJsonBytes) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderJsonBytes) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderJsonBytes) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderJsonBytes) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderJsonBytes) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderJsonBytes) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderJsonBytes) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverJsonBytes) newEncoderBytes(out *[]byte, h Handle) *encoderJsonBytes { + var c1 encoderJsonBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverJsonBytes) newEncoderIO(out io.Writer, h Handle) *encoderJsonBytes { + var c1 encoderJsonBytes + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverJsonBytes) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsJsonBytes) (f *fastpathEJsonBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverJsonBytes) encFindRtidFn(s []encRtidFnJsonBytes, rtid uintptr) (i uint, fn *encFnJsonBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverJsonBytes) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnJsonBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnJsonBytes](v)) + } + return +} + +func (dh helperEncDriverJsonBytes) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsJsonBytes, checkExt bool) (fn *encFnJsonBytes) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverJsonBytes) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsJsonBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnJsonBytes) { + rtid := rt2id(rt) + var sp []encRtidFnJsonBytes = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverJsonBytes) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsJsonBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnJsonBytes) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnJsonBytes + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnJsonBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnJsonBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnJsonBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverJsonBytes) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsJsonBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnJsonBytes) { + fn = new(encFnJsonBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderJsonBytes).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderJsonBytes).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderJsonBytes).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderJsonBytes).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderJsonBytes).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderJsonBytes).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderJsonBytes).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderJsonBytes).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderJsonBytes, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderJsonBytes).kBool + case reflect.String: + + fn.fe = (*encoderJsonBytes).kString + case reflect.Int: + fn.fe = (*encoderJsonBytes).kInt + case reflect.Int8: + fn.fe = (*encoderJsonBytes).kInt8 + case reflect.Int16: + fn.fe = (*encoderJsonBytes).kInt16 + case reflect.Int32: + fn.fe = (*encoderJsonBytes).kInt32 + case reflect.Int64: + fn.fe = (*encoderJsonBytes).kInt64 + case reflect.Uint: + fn.fe = (*encoderJsonBytes).kUint + case reflect.Uint8: + fn.fe = (*encoderJsonBytes).kUint8 + case reflect.Uint16: + fn.fe = (*encoderJsonBytes).kUint16 + case reflect.Uint32: + fn.fe = (*encoderJsonBytes).kUint32 + case reflect.Uint64: + fn.fe = (*encoderJsonBytes).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderJsonBytes).kUintptr + case reflect.Float32: + fn.fe = (*encoderJsonBytes).kFloat32 + case reflect.Float64: + fn.fe = (*encoderJsonBytes).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderJsonBytes).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderJsonBytes).kComplex128 + case reflect.Chan: + fn.fe = (*encoderJsonBytes).kChan + case reflect.Slice: + fn.fe = (*encoderJsonBytes).kSlice + case reflect.Array: + fn.fe = (*encoderJsonBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderJsonBytes).kStructSimple + } else { + fn.fe = (*encoderJsonBytes).kStruct + } + case reflect.Map: + fn.fe = (*encoderJsonBytes).kMap + case reflect.Interface: + + fn.fe = (*encoderJsonBytes).kErr + default: + + fn.fe = (*encoderJsonBytes).kErr + } + } + } + return +} +func (d *decoderJsonBytes) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderJsonBytes) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderJsonBytes) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderJsonBytes) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderJsonBytes) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderJsonBytes) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderJsonBytes) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderJsonBytes) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderJsonBytes) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderJsonBytes) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderJsonBytes) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderJsonBytes) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderJsonBytes) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderJsonBytes) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderJsonBytes) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderJsonBytes) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderJsonBytes) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderJsonBytes) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderJsonBytes) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderJsonBytes) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderJsonBytes) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderJsonBytes) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderJsonBytes) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderJsonBytes) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderJsonBytes) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderJsonBytes) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderJsonBytes) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderJsonBytes) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderJsonBytes) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderJsonBytes) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderJsonBytes) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderJsonBytes) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderJsonBytes) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnJsonBytes + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderJsonBytes) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnJsonBytes + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonBytes) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnJsonBytes + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderJsonBytes) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnJsonBytes + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderJsonBytes) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsJsonBytes) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderJsonBytes) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderJsonBytes) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderJsonBytes) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderJsonBytes) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderJsonBytes) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderJsonBytes) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderJsonBytes) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderJsonBytes) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderJsonBytes) Release() {} + +func (d *decoderJsonBytes) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderJsonBytes) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderJsonBytes) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderJsonBytes) decodeValue(rv reflect.Value, fn *decFnJsonBytes) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderJsonBytes) decodeValueNoCheckNil(rv reflect.Value, fn *decFnJsonBytes) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderJsonBytes) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderJsonBytes) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderJsonBytes) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderJsonBytes) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderJsonBytes) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderJsonBytes) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderJsonBytes) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderJsonBytes) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderJsonBytes) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderJsonBytes) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderJsonBytes) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderJsonBytes) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderJsonBytes) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderJsonBytes) fn(t reflect.Type) *decFnJsonBytes { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderJsonBytes) fnNoExt(t reflect.Type) *decFnJsonBytes { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverJsonBytes) newDecoderBytes(in []byte, h Handle) *decoderJsonBytes { + var c1 decoderJsonBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverJsonBytes) newDecoderIO(in io.Reader, h Handle) *decoderJsonBytes { + var c1 decoderJsonBytes + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverJsonBytes) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsJsonBytes) (f *fastpathDJsonBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverJsonBytes) decFindRtidFn(s []decRtidFnJsonBytes, rtid uintptr) (i uint, fn *decFnJsonBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverJsonBytes) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnJsonBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnJsonBytes](v)) + } + return +} + +func (dh helperDecDriverJsonBytes) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsJsonBytes, + checkExt bool) (fn *decFnJsonBytes) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverJsonBytes) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsJsonBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnJsonBytes) { + rtid := rt2id(rt) + var sp []decRtidFnJsonBytes = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverJsonBytes) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsJsonBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnJsonBytes) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnJsonBytes + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnJsonBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnJsonBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnJsonBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverJsonBytes) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsJsonBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnJsonBytes) { + fn = new(decFnJsonBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderJsonBytes).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderJsonBytes).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderJsonBytes).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderJsonBytes).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderJsonBytes).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderJsonBytes).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderJsonBytes).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderJsonBytes).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderJsonBytes, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderJsonBytes, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderJsonBytes).kBool + case reflect.String: + fn.fd = (*decoderJsonBytes).kString + case reflect.Int: + fn.fd = (*decoderJsonBytes).kInt + case reflect.Int8: + fn.fd = (*decoderJsonBytes).kInt8 + case reflect.Int16: + fn.fd = (*decoderJsonBytes).kInt16 + case reflect.Int32: + fn.fd = (*decoderJsonBytes).kInt32 + case reflect.Int64: + fn.fd = (*decoderJsonBytes).kInt64 + case reflect.Uint: + fn.fd = (*decoderJsonBytes).kUint + case reflect.Uint8: + fn.fd = (*decoderJsonBytes).kUint8 + case reflect.Uint16: + fn.fd = (*decoderJsonBytes).kUint16 + case reflect.Uint32: + fn.fd = (*decoderJsonBytes).kUint32 + case reflect.Uint64: + fn.fd = (*decoderJsonBytes).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderJsonBytes).kUintptr + case reflect.Float32: + fn.fd = (*decoderJsonBytes).kFloat32 + case reflect.Float64: + fn.fd = (*decoderJsonBytes).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderJsonBytes).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderJsonBytes).kComplex128 + case reflect.Chan: + fn.fd = (*decoderJsonBytes).kChan + case reflect.Slice: + fn.fd = (*decoderJsonBytes).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderJsonBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderJsonBytes).kStructSimple + } else { + fn.fd = (*decoderJsonBytes).kStruct + } + case reflect.Map: + fn.fd = (*decoderJsonBytes).kMap + case reflect.Interface: + + fn.fd = (*decoderJsonBytes).kInterface + default: + + fn.fd = (*decoderJsonBytes).kErr + } + } + } + return +} +func (e *jsonEncDriverBytes) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.di < 0 { + x = -x + for x > len(jsonTabs) { + e.w.writeb(jsonTabs[:]) + x -= len(jsonTabs) + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > len(jsonSpaces) { + e.w.writeb(jsonSpaces[:]) + x -= len(jsonSpaces) + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverBytes) WriteArrayElem(firstTime bool) { + if !firstTime { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } +} + +func (e *jsonEncDriverBytes) WriteMapElemKey(firstTime bool) { + if !firstTime { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } +} + +func (e *jsonEncDriverBytes) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } +} + +func (e *jsonEncDriverBytes) EncodeNil() { + + e.w.writeb(jsonNull) +} + +func (e *jsonEncDriverBytes) encodeIntAsUint(v int64, quotes bool) { + neg := v < 0 + if neg { + v = -v + } + e.encodeUint(neg, quotes, uint64(v)) +} + +func (e *jsonEncDriverBytes) EncodeTime(t time.Time) { + + if t.IsZero() { + e.EncodeNil() + return + } + switch e.timeFmt { + case jsonTimeFmtStringLayout: + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], e.timeFmtLayout) + e.b[len(b)+1] = '"' + e.w.writeb(e.b[:len(b)+2]) + case jsonTimeFmtUnix: + e.encodeIntAsUint(t.Unix(), false) + case jsonTimeFmtUnixMilli: + e.encodeIntAsUint(t.UnixMilli(), false) + case jsonTimeFmtUnixMicro: + e.encodeIntAsUint(t.UnixMicro(), false) + case jsonTimeFmtUnixNano: + e.encodeIntAsUint(t.UnixNano(), false) + } +} + +func (e *jsonEncDriverBytes) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if ext == SelfExt { + e.enc.encodeAs(rv, basetype, false) + } else if v := ext.ConvertExt(rv); v == nil { + e.writeNilBytes() + } else { + e.enc.encodeI(v) + } +} + +func (e *jsonEncDriverBytes) EncodeRawExt(re *RawExt) { + if re.Data != nil { + e.w.writeb(re.Data) + } else if re.Value != nil { + e.enc.encodeI(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *jsonEncDriverBytes) EncodeBool(b bool) { + e.w.writestr(jsonEncBoolStrs[bool2int(e.ks && e.e.c == containerMapKey)%2][bool2int(b)%2]) +} + +func (e *jsonEncDriverBytes) encodeFloat(f float64, bitsize, fmt byte, prec int8) { + var blen uint + if e.ks && e.e.c == containerMapKey { + blen = 2 + uint(len(strconv.AppendFloat(e.b[1:1], f, fmt, int(prec), int(bitsize)))) + + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + } else { + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), int(bitsize))) + } +} + +func (e *jsonEncDriverBytes) EncodeFloat64(f float64) { + if math.IsNaN(f) || math.IsInf(f, 0) { + e.EncodeNil() + return + } + fmt, prec := jsonFloatStrconvFmtPrec64(f) + e.encodeFloat(f, 64, fmt, prec) +} + +func (e *jsonEncDriverBytes) EncodeFloat32(f float32) { + if math.IsNaN(float64(f)) || math.IsInf(float64(f), 0) { + e.EncodeNil() + return + } + fmt, prec := jsonFloatStrconvFmtPrec32(f) + e.encodeFloat(float64(f), 32, fmt, prec) +} + +func (e *jsonEncDriverBytes) encodeUint(neg bool, quotes bool, u uint64) { + e.w.writeb(jsonEncodeUint(neg, quotes, u, &e.b)) +} + +func (e *jsonEncDriverBytes) EncodeInt(v int64) { + quotes := e.is == 'A' || e.is == 'L' && (v > 1<<53 || v < -(1<<53)) || + (e.ks && e.e.c == containerMapKey) + + if cpu32Bit { + if quotes { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + } else { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + } + return + } + + if v < 0 { + e.encodeUint(true, quotes, uint64(-v)) + } else { + e.encodeUint(false, quotes, uint64(v)) + } +} + +func (e *jsonEncDriverBytes) EncodeUint(v uint64) { + quotes := e.is == 'A' || e.is == 'L' && v > 1<<53 || + (e.ks && e.e.c == containerMapKey) + + if cpu32Bit { + + if quotes { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + } else { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + } + return + } + + e.encodeUint(false, quotes, v) +} + +func (e *jsonEncDriverBytes) EncodeString(v string) { + if e.h.StringToRaw { + e.EncodeStringBytesRaw(bytesView(v)) + return + } + e.quoteStr(v) +} + +func (e *jsonEncDriverBytes) EncodeStringNoEscape4Json(v string) { e.w.writeqstr(v) } + +func (e *jsonEncDriverBytes) EncodeStringBytesRaw(v []byte) { + if e.rawext { + + iv := e.h.RawBytesExt.ConvertExt(any(v)) + if iv == nil { + e.EncodeNil() + } else { + e.enc.encodeI(iv) + } + return + } + + if e.bytesFmt == jsonBytesFmtArray { + e.WriteArrayStart(len(v)) + for j := range v { + e.WriteArrayElem(j == 0) + e.encodeUint(false, false, uint64(v[j])) + } + e.WriteArrayEnd() + return + } + + var slen int + if e.bytesFmt == jsonBytesFmtBase64 { + slen = base64.StdEncoding.EncodedLen(len(v)) + } else { + slen = e.byteFmter.EncodedLen(len(v)) + } + slen += 2 + + bs := e.e.blist.peek(slen, false)[:slen] + + if e.bytesFmt == jsonBytesFmtBase64 { + base64.StdEncoding.Encode(bs[1:], v) + } else { + e.byteFmter.Encode(bs[1:], v) + } + + bs[len(bs)-1] = '"' + bs[0] = '"' + e.w.writeb(bs) +} + +func (e *jsonEncDriverBytes) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *jsonEncDriverBytes) writeNilOr(v []byte) { + if !e.h.NilCollectionToZeroLength { + v = jsonNull + } + e.w.writeb(v) +} + +func (e *jsonEncDriverBytes) writeNilBytes() { + e.writeNilOr(jsonArrayEmpty) +} + +func (e *jsonEncDriverBytes) writeNilArray() { + e.writeNilOr(jsonArrayEmpty) +} + +func (e *jsonEncDriverBytes) writeNilMap() { + e.writeNilOr(jsonMapEmpty) +} + +func (e *jsonEncDriverBytes) WriteArrayEmpty() { + e.w.writen2('[', ']') +} + +func (e *jsonEncDriverBytes) WriteMapEmpty() { + e.w.writen2('{', '}') +} + +func (e *jsonEncDriverBytes) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') +} + +func (e *jsonEncDriverBytes) WriteArrayEnd() { + if e.d { + e.dl-- + + e.writeIndent() + } + e.w.writen1(']') +} + +func (e *jsonEncDriverBytes) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') +} + +func (e *jsonEncDriverBytes) WriteMapEnd() { + if e.d { + e.dl-- + + e.writeIndent() + } + e.w.writen1('}') +} + +func (e *jsonEncDriverBytes) quoteStr(s string) { + + const hex = "0123456789abcdef" + e.w.writen1('"') + var i, start uint + for i < uint(len(s)) { + + b := s[i] + if e.s.isset(b) { + i++ + continue + } + if b < utf8.RuneSelf { + if start < i { + e.w.writestr(s[start:i]) + } + switch b { + case '\\': + e.w.writen2('\\', '\\') + case '"': + e.w.writen2('\\', '"') + case '\n': + e.w.writen2('\\', 'n') + case '\t': + e.w.writen2('\\', 't') + case '\r': + e.w.writen2('\\', 'r') + case '\b': + e.w.writen2('\\', 'b') + case '\f': + e.w.writen2('\\', 'f') + default: + e.w.writestr(`\u00`) + e.w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.w.writestr(s[start:i]) + } + e.w.writestr(`\uFFFD`) + i++ + start = i + continue + } + + if jsonEscapeMultiByteUnicodeSep && (c == '\u2028' || c == '\u2029') { + if start < i { + e.w.writestr(s[start:i]) + } + e.w.writestr(`\u202`) + e.w.writen1(hex[c&0xF]) + i += uint(size) + start = i + continue + } + i += uint(size) + } + if start < uint(len(s)) { + e.w.writestr(s[start:]) + } + e.w.writen1('"') +} + +func (e *jsonEncDriverBytes) atEndOfEncode() { + if e.h.TermWhitespace { + var c byte = ' ' + if e.e.c != 0 { + c = '\n' + } + e.w.writen1(c) + } +} + +func (d *jsonDecDriverBytes) ReadMapStart() int { + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return containerLenNil + } + if d.tok != '{' { + halt.errorByte("read map - expect char '{' but got char: ", d.tok) + } + d.tok = 0 + return containerLenUnknown +} + +func (d *jsonDecDriverBytes) ReadArrayStart() int { + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return containerLenNil + } + if d.tok != '[' { + halt.errorByte("read array - expect char '[' but got char ", d.tok) + } + d.tok = 0 + return containerLenUnknown +} + +func (d *jsonDecDriverBytes) CheckBreak() bool { + d.advance() + return d.tok == '}' || d.tok == ']' +} + +func (d *jsonDecDriverBytes) checkSep(xc byte) { + d.advance() + if d.tok != xc { + d.readDelimError(xc) + } + d.tok = 0 +} + +func (d *jsonDecDriverBytes) ReadArrayElem(firstTime bool) { + if !firstTime { + d.checkSep(',') + } +} + +func (d *jsonDecDriverBytes) ReadArrayEnd() { + d.checkSep(']') +} + +func (d *jsonDecDriverBytes) ReadMapElemKey(firstTime bool) { + d.ReadArrayElem(firstTime) +} + +func (d *jsonDecDriverBytes) ReadMapElemValue() { + d.checkSep(':') +} + +func (d *jsonDecDriverBytes) ReadMapEnd() { + d.checkSep('}') +} + +func (d *jsonDecDriverBytes) readDelimError(xc uint8) { + halt.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok) +} + +func (d *jsonDecDriverBytes) checkLit3(got, expect [3]byte) { + if jsonValidateSymbols && got != expect { + jsonCheckLitErr3(got, expect) + } + d.tok = 0 +} + +func (d *jsonDecDriverBytes) checkLit4(got, expect [4]byte) { + if jsonValidateSymbols && got != expect { + jsonCheckLitErr4(got, expect) + } + d.tok = 0 +} + +func (d *jsonDecDriverBytes) skipWhitespace() { + d.tok = d.r.skipWhitespace() +} + +func (d *jsonDecDriverBytes) advance() { + + if d.tok < 33 { + d.skipWhitespace() + } +} + +func (d *jsonDecDriverBytes) nextValueBytes() []byte { + consumeString := func() { + TOP: + _, c := d.r.jsonReadAsisChars() + if c == '\\' { + d.r.readn1() + goto TOP + } + } + + d.advance() + d.r.startRecording() + + switch d.tok { + default: + _, d.tok = d.r.jsonReadNum() + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + case '"': + consumeString() + d.tok = 0 + case '{', '[': + var elem struct{} + var stack []struct{} + + stack = append(stack, elem) + + for len(stack) != 0 { + c := d.r.readn1() + switch c { + case '"': + consumeString() + case '{', '[': + stack = append(stack, elem) + case '}', ']': + stack = stack[:len(stack)-1] + } + } + d.tok = 0 + } + return d.r.stopRecording() +} + +func (d *jsonDecDriverBytes) TryNil() bool { + d.advance() + + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return true + } + return false +} + +func (d *jsonDecDriverBytes) DecodeBool() (v bool) { + d.advance() + + fquot := d.d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + v = true + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + + default: + halt.errorByte("decode bool: got first char: ", d.tok) + + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriverBytes) DecodeTime() (t time.Time) { + + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return + } + var bs []byte + + if d.tok != '"' { + bs, d.tok = d.r.jsonReadNum() + i := d.parseInt64(bs) + switch d.timeFmtNum { + case jsonTimeFmtUnix: + t = time.Unix(i, 0) + case jsonTimeFmtUnixMilli: + t = time.UnixMilli(i) + case jsonTimeFmtUnixMicro: + t = time.UnixMicro(i) + case jsonTimeFmtUnixNano: + t = time.Unix(0, i) + default: + halt.errorStr("invalid timeFmtNum") + } + return + } + + bs = d.readUnescapedString() + var err error + for _, v := range d.timeFmtLayouts { + t, err = time.Parse(v, stringView(bs)) + if err == nil { + return + } + } + halt.errorStr("error decoding time") + return +} + +func (d *jsonDecDriverBytes) ContainerType() (vt valueType) { + + d.advance() + + if d.tok == '{' { + return valueTypeMap + } else if d.tok == '[' { + return valueTypeArray + } else if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return valueTypeNil + } else if d.tok == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriverBytes) decNumBytes() (bs []byte) { + d.advance() + if d.tok == '"' { + bs = d.r.jsonReadUntilDblQuote() + d.tok = 0 + } else if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + } else { + bs, d.tok = d.r.jsonReadNum() + } + return +} + +func (d *jsonDecDriverBytes) DecodeUint64() (u uint64) { + b := d.decNumBytes() + u, neg, ok := parseInteger_bytes(b) + if neg { + halt.errorf("negative number cannot be decoded as uint64: %s", any(b)) + } + if !ok { + halt.onerror(strconvParseErr(b, "ParseUint")) + } + return +} + +func (d *jsonDecDriverBytes) DecodeInt64() (v int64) { + return d.parseInt64(d.decNumBytes()) +} + +func (d *jsonDecDriverBytes) parseInt64(b []byte) (v int64) { + u, neg, ok := parseInteger_bytes(b) + if !ok { + halt.onerror(strconvParseErr(b, "ParseInt")) + } + if chkOvf.Uint2Int(u, neg) { + halt.errorBytes("overflow decoding number from ", b) + } + if neg { + v = -int64(u) + } else { + v = int64(u) + } + return +} + +func (d *jsonDecDriverBytes) DecodeFloat64() (f float64) { + var err error + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err = parseFloat64(bs) + halt.onerror(err) + return +} + +func (d *jsonDecDriverBytes) DecodeFloat32() (f float32) { + var err error + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err = parseFloat32(bs) + halt.onerror(err) + return +} + +func (d *jsonDecDriverBytes) advanceNil() (ok bool) { + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return true + } + return false +} + +func (d *jsonDecDriverBytes) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if d.advanceNil() { + return + } + if ext == SelfExt { + d.dec.decodeAs(rv, basetype, false) + } else { + d.dec.interfaceExtConvertAndDecode(rv, ext) + } +} + +func (d *jsonDecDriverBytes) DecodeRawExt(re *RawExt) { + if d.advanceNil() { + return + } + d.dec.decode(&re.Value) +} + +func (d *jsonDecDriverBytes) decBytesFromArray(bs []byte) []byte { + d.advance() + if d.tok != ']' { + bs = append(bs, uint8(d.DecodeUint64())) + d.advance() + } + for d.tok != ']' { + if d.tok != ',' { + halt.errorByte("read array element - expect char ',' but got char: ", d.tok) + } + d.tok = 0 + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + d.advance() + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriverBytes) DecodeBytes() (bs []byte, state dBytesAttachState) { + d.advance() + state = dBytesDetach + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return + } + state = dBytesAttachBuffer + + if d.rawext { + d.buf = d.buf[:0] + d.dec.interfaceExtConvertAndDecode(&d.buf, d.h.RawBytesExt) + bs = d.buf + return + } + + if d.tok == '[' { + d.tok = 0 + + bs = d.decBytesFromArray(d.buf[:0]) + d.buf = bs + return + } + + d.ensureReadingString() + bs1 := d.readUnescapedString() + + slen := base64.StdEncoding.DecodedLen(len(bs1)) + if slen == 0 { + bs = zeroByteSlice + state = dBytesDetach + } else if slen <= cap(d.buf) { + bs = d.buf[:slen] + } else { + d.buf = d.d.blist.putGet(d.buf, slen)[:slen] + bs = d.buf + } + var err error + for _, v := range d.byteFmters { + + slen, err = v.Decode(bs, bs1) + if err == nil { + bs = bs[:slen] + return + } + } + halt.errorf("error decoding byte string '%s': %v", any(bs1), err) + return +} + +func (d *jsonDecDriverBytes) DecodeStringAsBytes() (bs []byte, state dBytesAttachState) { + d.advance() + + var cond bool + + if d.tok == '"' { + d.tok = 0 + bs, cond = d.dblQuoteStringAsBytes() + state = d.d.attachState(cond) + return + } + + state = dBytesDetach + + switch d.tok { + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + bs = jsonLitb[jsonLitF : jsonLitF+5] + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + bs = jsonLitb[jsonLitT : jsonLitT+4] + default: + + bs, d.tok = d.r.jsonReadNum() + state = d.d.attachState(!d.d.bytes) + } + return +} + +func (d *jsonDecDriverBytes) ensureReadingString() { + if d.tok != '"' { + halt.errorByte("expecting string starting with '\"'; got ", d.tok) + } +} + +func (d *jsonDecDriverBytes) readUnescapedString() (bs []byte) { + + bs = d.r.jsonReadUntilDblQuote() + d.tok = 0 + return +} + +func (d *jsonDecDriverBytes) dblQuoteStringAsBytes() (buf []byte, usingBuf bool) { + bs, c := d.r.jsonReadAsisChars() + if c == '"' { + return bs, !d.d.bytes + } + buf = append(d.buf[:0], bs...) + + checkUtf8 := d.h.ValidateUnicode + usingBuf = true + + for { + + c = d.r.readn1() + + switch c { + case '"', '\\', '/', '\'': + buf = append(buf, c) + case 'b': + buf = append(buf, '\b') + case 'f': + buf = append(buf, '\f') + case 'n': + buf = append(buf, '\n') + case 'r': + buf = append(buf, '\r') + case 't': + buf = append(buf, '\t') + case 'u': + rr := d.appendStringAsBytesSlashU() + if checkUtf8 && rr == unicode.ReplacementChar { + d.buf = buf + halt.errorBytes("invalid UTF-8 character found after: ", buf) + } + buf = append(buf, d.bstr[:utf8.EncodeRune(d.bstr[:], rr)]...) + default: + d.buf = buf + halt.errorByte("unsupported escaped value: ", c) + } + + bs, c = d.r.jsonReadAsisChars() + buf = append(buf, bs...) + if c == '"' { + break + } + } + d.buf = buf + return +} + +func (d *jsonDecDriverBytes) appendStringAsBytesSlashU() (r rune) { + var rr uint32 + cs := d.r.readn4() + if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar { + return unicode.ReplacementChar + } + r = rune(rr) + if utf16.IsSurrogate(r) { + csu := d.r.readn2() + cs = d.r.readn4() + if csu[0] == '\\' && csu[1] == 'u' { + if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar { + return unicode.ReplacementChar + } + return utf16.DecodeRune(r, rune(rr)) + } + return unicode.ReplacementChar + } + return +} + +func (d *jsonDecDriverBytes) DecodeNaked() { + z := d.d.naked() + + d.advance() + var bs []byte + var err error + switch d.tok { + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + z.v = valueTypeNil + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + z.v = valueTypeBool + z.b = false + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap + case '[': + z.v = valueTypeArray + case '"': + + d.tok = 0 + bs, z.b = d.dblQuoteStringAsBytes() + att := d.d.attachState(z.b) + if jsonNakedBoolNumInQuotedStr && + d.h.MapKeyAsString && len(bs) > 0 && d.d.c == containerMapKey { + switch string(bs) { + + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + if err = jsonNakedNum(z, bs, d.h.PreferFloat, d.h.SignedInteger); err != nil { + z.v = valueTypeString + z.s = d.d.detach2Str(bs, att) + } + } + } else { + z.v = valueTypeString + z.s = d.d.detach2Str(bs, att) + } + default: + bs, d.tok = d.r.jsonReadNum() + if len(bs) == 0 { + halt.errorStr("decode number from empty string") + } + if err = jsonNakedNum(z, bs, d.h.PreferFloat, d.h.SignedInteger); err != nil { + halt.errorf("decode number from %s: %v", any(bs), err) + } + } +} + +func (e *jsonEncDriverBytes) reset() { + e.dl = 0 + + e.typical = e.h.typical() + if e.h.HTMLCharsAsIs { + e.s = &jsonCharSafeBitset + } else { + e.s = &jsonCharHtmlSafeBitset + } + e.di = int8(e.h.Indent) + e.d = e.h.Indent != 0 + e.ks = e.h.MapKeyAsString + e.is = e.h.IntegerAsString + + var ho jsonHandleOpts + ho.reset(e.h) + e.timeFmt = ho.timeFmt + e.bytesFmt = ho.bytesFmt + e.timeFmtLayout = "" + e.byteFmter = nil + if len(ho.timeFmtLayouts) > 0 { + e.timeFmtLayout = ho.timeFmtLayouts[0] + } + if len(ho.byteFmters) > 0 { + e.byteFmter = ho.byteFmters[0] + } + e.rawext = ho.rawext +} + +func (d *jsonDecDriverBytes) reset() { + d.buf = d.d.blist.check(d.buf, 256) + d.tok = 0 + + d.jsonHandleOpts.reset(d.h) +} + +func (d *jsonEncDriverBytes) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*JsonHandle) + d.e = shared + if shared.bytes { + fp = jsonFpEncBytes + } else { + fp = jsonFpEncIO + } + + d.init2(enc) + return +} + +func (e *jsonEncDriverBytes) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *jsonEncDriverBytes) writerEnd() { e.w.end() } + +func (e *jsonEncDriverBytes) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *jsonEncDriverBytes) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *jsonDecDriverBytes) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*JsonHandle) + d.d = shared + if shared.bytes { + fp = jsonFpDecBytes + } else { + fp = jsonFpDecIO + } + + d.init2(dec) + return +} + +func (d *jsonDecDriverBytes) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *jsonDecDriverBytes) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *jsonDecDriverBytes) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *jsonDecDriverBytes) descBd() (s string) { + halt.onerror(errJsonNoBd) + return +} + +func (d *jsonEncDriverBytes) init2(enc encoderI) { + d.enc = enc + +} + +func (d *jsonDecDriverBytes) init2(dec decoderI) { + d.dec = dec + + d.buf = d.buf[:0] + + d.d.jsms = d.h.MapKeyAsString +} + +type helperEncDriverJsonIO struct{} +type encFnJsonIO struct { + i encFnInfo + fe func(*encoderJsonIO, *encFnInfo, reflect.Value) +} +type encRtidFnJsonIO struct { + rtid uintptr + fn *encFnJsonIO +} +type encoderJsonIO struct { + dh helperEncDriverJsonIO + fp *fastpathEsJsonIO + e jsonEncDriverIO + encoderBase +} +type helperDecDriverJsonIO struct{} +type decFnJsonIO struct { + i decFnInfo + fd func(*decoderJsonIO, *decFnInfo, reflect.Value) +} +type decRtidFnJsonIO struct { + rtid uintptr + fn *decFnJsonIO +} +type decoderJsonIO struct { + dh helperDecDriverJsonIO + fp *fastpathDsJsonIO + d jsonDecDriverIO + decoderBase +} +type jsonEncDriverIO struct { + noBuiltInTypes + h *JsonHandle + e *encoderBase + s *bitset256 + + w bufioEncWriter + + enc encoderI + + timeFmtLayout string + byteFmter jsonBytesFmter + + timeFmt jsonTimeFmt + bytesFmt jsonBytesFmt + + di int8 + d bool + dl uint16 + + ks bool + is byte + + typical bool + + rawext bool + + b [48]byte +} +type jsonDecDriverIO struct { + noBuiltInTypes + decDriverNoopNumberHelper + h *JsonHandle + d *decoderBase + + r ioDecReader + + buf []byte + + tok uint8 + _ bool + _ byte + bstr [4]byte + + jsonHandleOpts + + dec decoderI +} + +func (e *encoderJsonIO) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderJsonIO) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderJsonIO) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderJsonIO) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderJsonIO) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderJsonIO) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderJsonIO) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderJsonIO) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderJsonIO) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderJsonIO) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderJsonIO) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderJsonIO) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderJsonIO) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderJsonIO) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderJsonIO) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderJsonIO) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderJsonIO) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderJsonIO) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderJsonIO) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderJsonIO) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderJsonIO) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderJsonIO) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderJsonIO) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderJsonIO) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderJsonIO) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderJsonIO) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderJsonIO) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderJsonIO) kSeqFn(rt reflect.Type) (fn *encFnJsonIO) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderJsonIO) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnJsonIO + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderJsonIO) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnJsonIO + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderJsonIO) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderJsonIO) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderJsonIO) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderJsonIO) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderJsonIO) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderJsonIO) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderJsonIO) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderJsonIO) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnJsonIO + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderJsonIO) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnJsonIO) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderJsonIO) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsJsonIO) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderJsonIO) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderJsonIO) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderJsonIO) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderJsonIO) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderJsonIO) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderJsonIO) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderJsonIO) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderJsonIO) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderJsonIO) encodeValue(rv reflect.Value, fn *encFnJsonIO) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderJsonIO) encodeValueNonNil(rv reflect.Value, fn *encFnJsonIO) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderJsonIO) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderJsonIO) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderJsonIO) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderJsonIO) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderJsonIO) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderJsonIO) fn(t reflect.Type) *encFnJsonIO { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderJsonIO) fnNoExt(t reflect.Type) *encFnJsonIO { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderJsonIO) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderJsonIO) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderJsonIO) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderJsonIO) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderJsonIO) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderJsonIO) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderJsonIO) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderJsonIO) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverJsonIO) newEncoderBytes(out *[]byte, h Handle) *encoderJsonIO { + var c1 encoderJsonIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverJsonIO) newEncoderIO(out io.Writer, h Handle) *encoderJsonIO { + var c1 encoderJsonIO + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverJsonIO) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsJsonIO) (f *fastpathEJsonIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverJsonIO) encFindRtidFn(s []encRtidFnJsonIO, rtid uintptr) (i uint, fn *encFnJsonIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverJsonIO) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnJsonIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnJsonIO](v)) + } + return +} + +func (dh helperEncDriverJsonIO) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsJsonIO, checkExt bool) (fn *encFnJsonIO) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverJsonIO) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsJsonIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnJsonIO) { + rtid := rt2id(rt) + var sp []encRtidFnJsonIO = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverJsonIO) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsJsonIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnJsonIO) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnJsonIO + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnJsonIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnJsonIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnJsonIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverJsonIO) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsJsonIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnJsonIO) { + fn = new(encFnJsonIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderJsonIO).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderJsonIO).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderJsonIO).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderJsonIO).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderJsonIO).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderJsonIO).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderJsonIO).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderJsonIO).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderJsonIO, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderJsonIO).kBool + case reflect.String: + + fn.fe = (*encoderJsonIO).kString + case reflect.Int: + fn.fe = (*encoderJsonIO).kInt + case reflect.Int8: + fn.fe = (*encoderJsonIO).kInt8 + case reflect.Int16: + fn.fe = (*encoderJsonIO).kInt16 + case reflect.Int32: + fn.fe = (*encoderJsonIO).kInt32 + case reflect.Int64: + fn.fe = (*encoderJsonIO).kInt64 + case reflect.Uint: + fn.fe = (*encoderJsonIO).kUint + case reflect.Uint8: + fn.fe = (*encoderJsonIO).kUint8 + case reflect.Uint16: + fn.fe = (*encoderJsonIO).kUint16 + case reflect.Uint32: + fn.fe = (*encoderJsonIO).kUint32 + case reflect.Uint64: + fn.fe = (*encoderJsonIO).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderJsonIO).kUintptr + case reflect.Float32: + fn.fe = (*encoderJsonIO).kFloat32 + case reflect.Float64: + fn.fe = (*encoderJsonIO).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderJsonIO).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderJsonIO).kComplex128 + case reflect.Chan: + fn.fe = (*encoderJsonIO).kChan + case reflect.Slice: + fn.fe = (*encoderJsonIO).kSlice + case reflect.Array: + fn.fe = (*encoderJsonIO).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderJsonIO).kStructSimple + } else { + fn.fe = (*encoderJsonIO).kStruct + } + case reflect.Map: + fn.fe = (*encoderJsonIO).kMap + case reflect.Interface: + + fn.fe = (*encoderJsonIO).kErr + default: + + fn.fe = (*encoderJsonIO).kErr + } + } + } + return +} +func (d *decoderJsonIO) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderJsonIO) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderJsonIO) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderJsonIO) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderJsonIO) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderJsonIO) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderJsonIO) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderJsonIO) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderJsonIO) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderJsonIO) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderJsonIO) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderJsonIO) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderJsonIO) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderJsonIO) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderJsonIO) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderJsonIO) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderJsonIO) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderJsonIO) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderJsonIO) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderJsonIO) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderJsonIO) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderJsonIO) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderJsonIO) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderJsonIO) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderJsonIO) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderJsonIO) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderJsonIO) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderJsonIO) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderJsonIO) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderJsonIO) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderJsonIO) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderJsonIO) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderJsonIO) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnJsonIO + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderJsonIO) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnJsonIO + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderJsonIO) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnJsonIO + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderJsonIO) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnJsonIO + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderJsonIO) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsJsonIO) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderJsonIO) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderJsonIO) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderJsonIO) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderJsonIO) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderJsonIO) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderJsonIO) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderJsonIO) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderJsonIO) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderJsonIO) Release() {} + +func (d *decoderJsonIO) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderJsonIO) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderJsonIO) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderJsonIO) decodeValue(rv reflect.Value, fn *decFnJsonIO) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderJsonIO) decodeValueNoCheckNil(rv reflect.Value, fn *decFnJsonIO) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderJsonIO) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderJsonIO) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderJsonIO) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderJsonIO) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderJsonIO) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderJsonIO) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderJsonIO) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderJsonIO) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderJsonIO) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderJsonIO) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderJsonIO) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderJsonIO) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderJsonIO) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderJsonIO) fn(t reflect.Type) *decFnJsonIO { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderJsonIO) fnNoExt(t reflect.Type) *decFnJsonIO { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverJsonIO) newDecoderBytes(in []byte, h Handle) *decoderJsonIO { + var c1 decoderJsonIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverJsonIO) newDecoderIO(in io.Reader, h Handle) *decoderJsonIO { + var c1 decoderJsonIO + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverJsonIO) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsJsonIO) (f *fastpathDJsonIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverJsonIO) decFindRtidFn(s []decRtidFnJsonIO, rtid uintptr) (i uint, fn *decFnJsonIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverJsonIO) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnJsonIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnJsonIO](v)) + } + return +} + +func (dh helperDecDriverJsonIO) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsJsonIO, + checkExt bool) (fn *decFnJsonIO) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverJsonIO) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsJsonIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnJsonIO) { + rtid := rt2id(rt) + var sp []decRtidFnJsonIO = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverJsonIO) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsJsonIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnJsonIO) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnJsonIO + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnJsonIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnJsonIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnJsonIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverJsonIO) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsJsonIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnJsonIO) { + fn = new(decFnJsonIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderJsonIO).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderJsonIO).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderJsonIO).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderJsonIO).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderJsonIO).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderJsonIO).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderJsonIO).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderJsonIO).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderJsonIO, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderJsonIO, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderJsonIO).kBool + case reflect.String: + fn.fd = (*decoderJsonIO).kString + case reflect.Int: + fn.fd = (*decoderJsonIO).kInt + case reflect.Int8: + fn.fd = (*decoderJsonIO).kInt8 + case reflect.Int16: + fn.fd = (*decoderJsonIO).kInt16 + case reflect.Int32: + fn.fd = (*decoderJsonIO).kInt32 + case reflect.Int64: + fn.fd = (*decoderJsonIO).kInt64 + case reflect.Uint: + fn.fd = (*decoderJsonIO).kUint + case reflect.Uint8: + fn.fd = (*decoderJsonIO).kUint8 + case reflect.Uint16: + fn.fd = (*decoderJsonIO).kUint16 + case reflect.Uint32: + fn.fd = (*decoderJsonIO).kUint32 + case reflect.Uint64: + fn.fd = (*decoderJsonIO).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderJsonIO).kUintptr + case reflect.Float32: + fn.fd = (*decoderJsonIO).kFloat32 + case reflect.Float64: + fn.fd = (*decoderJsonIO).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderJsonIO).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderJsonIO).kComplex128 + case reflect.Chan: + fn.fd = (*decoderJsonIO).kChan + case reflect.Slice: + fn.fd = (*decoderJsonIO).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderJsonIO).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderJsonIO).kStructSimple + } else { + fn.fd = (*decoderJsonIO).kStruct + } + case reflect.Map: + fn.fd = (*decoderJsonIO).kMap + case reflect.Interface: + + fn.fd = (*decoderJsonIO).kInterface + default: + + fn.fd = (*decoderJsonIO).kErr + } + } + } + return +} +func (e *jsonEncDriverIO) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.di < 0 { + x = -x + for x > len(jsonTabs) { + e.w.writeb(jsonTabs[:]) + x -= len(jsonTabs) + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > len(jsonSpaces) { + e.w.writeb(jsonSpaces[:]) + x -= len(jsonSpaces) + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverIO) WriteArrayElem(firstTime bool) { + if !firstTime { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } +} + +func (e *jsonEncDriverIO) WriteMapElemKey(firstTime bool) { + if !firstTime { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } +} + +func (e *jsonEncDriverIO) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } +} + +func (e *jsonEncDriverIO) EncodeNil() { + + e.w.writeb(jsonNull) +} + +func (e *jsonEncDriverIO) encodeIntAsUint(v int64, quotes bool) { + neg := v < 0 + if neg { + v = -v + } + e.encodeUint(neg, quotes, uint64(v)) +} + +func (e *jsonEncDriverIO) EncodeTime(t time.Time) { + + if t.IsZero() { + e.EncodeNil() + return + } + switch e.timeFmt { + case jsonTimeFmtStringLayout: + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], e.timeFmtLayout) + e.b[len(b)+1] = '"' + e.w.writeb(e.b[:len(b)+2]) + case jsonTimeFmtUnix: + e.encodeIntAsUint(t.Unix(), false) + case jsonTimeFmtUnixMilli: + e.encodeIntAsUint(t.UnixMilli(), false) + case jsonTimeFmtUnixMicro: + e.encodeIntAsUint(t.UnixMicro(), false) + case jsonTimeFmtUnixNano: + e.encodeIntAsUint(t.UnixNano(), false) + } +} + +func (e *jsonEncDriverIO) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if ext == SelfExt { + e.enc.encodeAs(rv, basetype, false) + } else if v := ext.ConvertExt(rv); v == nil { + e.writeNilBytes() + } else { + e.enc.encodeI(v) + } +} + +func (e *jsonEncDriverIO) EncodeRawExt(re *RawExt) { + if re.Data != nil { + e.w.writeb(re.Data) + } else if re.Value != nil { + e.enc.encodeI(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *jsonEncDriverIO) EncodeBool(b bool) { + e.w.writestr(jsonEncBoolStrs[bool2int(e.ks && e.e.c == containerMapKey)%2][bool2int(b)%2]) +} + +func (e *jsonEncDriverIO) encodeFloat(f float64, bitsize, fmt byte, prec int8) { + var blen uint + if e.ks && e.e.c == containerMapKey { + blen = 2 + uint(len(strconv.AppendFloat(e.b[1:1], f, fmt, int(prec), int(bitsize)))) + + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + } else { + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), int(bitsize))) + } +} + +func (e *jsonEncDriverIO) EncodeFloat64(f float64) { + if math.IsNaN(f) || math.IsInf(f, 0) { + e.EncodeNil() + return + } + fmt, prec := jsonFloatStrconvFmtPrec64(f) + e.encodeFloat(f, 64, fmt, prec) +} + +func (e *jsonEncDriverIO) EncodeFloat32(f float32) { + if math.IsNaN(float64(f)) || math.IsInf(float64(f), 0) { + e.EncodeNil() + return + } + fmt, prec := jsonFloatStrconvFmtPrec32(f) + e.encodeFloat(float64(f), 32, fmt, prec) +} + +func (e *jsonEncDriverIO) encodeUint(neg bool, quotes bool, u uint64) { + e.w.writeb(jsonEncodeUint(neg, quotes, u, &e.b)) +} + +func (e *jsonEncDriverIO) EncodeInt(v int64) { + quotes := e.is == 'A' || e.is == 'L' && (v > 1<<53 || v < -(1<<53)) || + (e.ks && e.e.c == containerMapKey) + + if cpu32Bit { + if quotes { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + } else { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + } + return + } + + if v < 0 { + e.encodeUint(true, quotes, uint64(-v)) + } else { + e.encodeUint(false, quotes, uint64(v)) + } +} + +func (e *jsonEncDriverIO) EncodeUint(v uint64) { + quotes := e.is == 'A' || e.is == 'L' && v > 1<<53 || + (e.ks && e.e.c == containerMapKey) + + if cpu32Bit { + + if quotes { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + } else { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + } + return + } + + e.encodeUint(false, quotes, v) +} + +func (e *jsonEncDriverIO) EncodeString(v string) { + if e.h.StringToRaw { + e.EncodeStringBytesRaw(bytesView(v)) + return + } + e.quoteStr(v) +} + +func (e *jsonEncDriverIO) EncodeStringNoEscape4Json(v string) { e.w.writeqstr(v) } + +func (e *jsonEncDriverIO) EncodeStringBytesRaw(v []byte) { + if e.rawext { + + iv := e.h.RawBytesExt.ConvertExt(any(v)) + if iv == nil { + e.EncodeNil() + } else { + e.enc.encodeI(iv) + } + return + } + + if e.bytesFmt == jsonBytesFmtArray { + e.WriteArrayStart(len(v)) + for j := range v { + e.WriteArrayElem(j == 0) + e.encodeUint(false, false, uint64(v[j])) + } + e.WriteArrayEnd() + return + } + + var slen int + if e.bytesFmt == jsonBytesFmtBase64 { + slen = base64.StdEncoding.EncodedLen(len(v)) + } else { + slen = e.byteFmter.EncodedLen(len(v)) + } + slen += 2 + + bs := e.e.blist.peek(slen, false)[:slen] + + if e.bytesFmt == jsonBytesFmtBase64 { + base64.StdEncoding.Encode(bs[1:], v) + } else { + e.byteFmter.Encode(bs[1:], v) + } + + bs[len(bs)-1] = '"' + bs[0] = '"' + e.w.writeb(bs) +} + +func (e *jsonEncDriverIO) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *jsonEncDriverIO) writeNilOr(v []byte) { + if !e.h.NilCollectionToZeroLength { + v = jsonNull + } + e.w.writeb(v) +} + +func (e *jsonEncDriverIO) writeNilBytes() { + e.writeNilOr(jsonArrayEmpty) +} + +func (e *jsonEncDriverIO) writeNilArray() { + e.writeNilOr(jsonArrayEmpty) +} + +func (e *jsonEncDriverIO) writeNilMap() { + e.writeNilOr(jsonMapEmpty) +} + +func (e *jsonEncDriverIO) WriteArrayEmpty() { + e.w.writen2('[', ']') +} + +func (e *jsonEncDriverIO) WriteMapEmpty() { + e.w.writen2('{', '}') +} + +func (e *jsonEncDriverIO) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') +} + +func (e *jsonEncDriverIO) WriteArrayEnd() { + if e.d { + e.dl-- + + e.writeIndent() + } + e.w.writen1(']') +} + +func (e *jsonEncDriverIO) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') +} + +func (e *jsonEncDriverIO) WriteMapEnd() { + if e.d { + e.dl-- + + e.writeIndent() + } + e.w.writen1('}') +} + +func (e *jsonEncDriverIO) quoteStr(s string) { + + const hex = "0123456789abcdef" + e.w.writen1('"') + var i, start uint + for i < uint(len(s)) { + + b := s[i] + if e.s.isset(b) { + i++ + continue + } + if b < utf8.RuneSelf { + if start < i { + e.w.writestr(s[start:i]) + } + switch b { + case '\\': + e.w.writen2('\\', '\\') + case '"': + e.w.writen2('\\', '"') + case '\n': + e.w.writen2('\\', 'n') + case '\t': + e.w.writen2('\\', 't') + case '\r': + e.w.writen2('\\', 'r') + case '\b': + e.w.writen2('\\', 'b') + case '\f': + e.w.writen2('\\', 'f') + default: + e.w.writestr(`\u00`) + e.w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.w.writestr(s[start:i]) + } + e.w.writestr(`\uFFFD`) + i++ + start = i + continue + } + + if jsonEscapeMultiByteUnicodeSep && (c == '\u2028' || c == '\u2029') { + if start < i { + e.w.writestr(s[start:i]) + } + e.w.writestr(`\u202`) + e.w.writen1(hex[c&0xF]) + i += uint(size) + start = i + continue + } + i += uint(size) + } + if start < uint(len(s)) { + e.w.writestr(s[start:]) + } + e.w.writen1('"') +} + +func (e *jsonEncDriverIO) atEndOfEncode() { + if e.h.TermWhitespace { + var c byte = ' ' + if e.e.c != 0 { + c = '\n' + } + e.w.writen1(c) + } +} + +func (d *jsonDecDriverIO) ReadMapStart() int { + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return containerLenNil + } + if d.tok != '{' { + halt.errorByte("read map - expect char '{' but got char: ", d.tok) + } + d.tok = 0 + return containerLenUnknown +} + +func (d *jsonDecDriverIO) ReadArrayStart() int { + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return containerLenNil + } + if d.tok != '[' { + halt.errorByte("read array - expect char '[' but got char ", d.tok) + } + d.tok = 0 + return containerLenUnknown +} + +func (d *jsonDecDriverIO) CheckBreak() bool { + d.advance() + return d.tok == '}' || d.tok == ']' +} + +func (d *jsonDecDriverIO) checkSep(xc byte) { + d.advance() + if d.tok != xc { + d.readDelimError(xc) + } + d.tok = 0 +} + +func (d *jsonDecDriverIO) ReadArrayElem(firstTime bool) { + if !firstTime { + d.checkSep(',') + } +} + +func (d *jsonDecDriverIO) ReadArrayEnd() { + d.checkSep(']') +} + +func (d *jsonDecDriverIO) ReadMapElemKey(firstTime bool) { + d.ReadArrayElem(firstTime) +} + +func (d *jsonDecDriverIO) ReadMapElemValue() { + d.checkSep(':') +} + +func (d *jsonDecDriverIO) ReadMapEnd() { + d.checkSep('}') +} + +func (d *jsonDecDriverIO) readDelimError(xc uint8) { + halt.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok) +} + +func (d *jsonDecDriverIO) checkLit3(got, expect [3]byte) { + if jsonValidateSymbols && got != expect { + jsonCheckLitErr3(got, expect) + } + d.tok = 0 +} + +func (d *jsonDecDriverIO) checkLit4(got, expect [4]byte) { + if jsonValidateSymbols && got != expect { + jsonCheckLitErr4(got, expect) + } + d.tok = 0 +} + +func (d *jsonDecDriverIO) skipWhitespace() { + d.tok = d.r.skipWhitespace() +} + +func (d *jsonDecDriverIO) advance() { + + if d.tok < 33 { + d.skipWhitespace() + } +} + +func (d *jsonDecDriverIO) nextValueBytes() []byte { + consumeString := func() { + TOP: + _, c := d.r.jsonReadAsisChars() + if c == '\\' { + d.r.readn1() + goto TOP + } + } + + d.advance() + d.r.startRecording() + + switch d.tok { + default: + _, d.tok = d.r.jsonReadNum() + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + case '"': + consumeString() + d.tok = 0 + case '{', '[': + var elem struct{} + var stack []struct{} + + stack = append(stack, elem) + + for len(stack) != 0 { + c := d.r.readn1() + switch c { + case '"': + consumeString() + case '{', '[': + stack = append(stack, elem) + case '}', ']': + stack = stack[:len(stack)-1] + } + } + d.tok = 0 + } + return d.r.stopRecording() +} + +func (d *jsonDecDriverIO) TryNil() bool { + d.advance() + + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return true + } + return false +} + +func (d *jsonDecDriverIO) DecodeBool() (v bool) { + d.advance() + + fquot := d.d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + v = true + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + + default: + halt.errorByte("decode bool: got first char: ", d.tok) + + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriverIO) DecodeTime() (t time.Time) { + + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return + } + var bs []byte + + if d.tok != '"' { + bs, d.tok = d.r.jsonReadNum() + i := d.parseInt64(bs) + switch d.timeFmtNum { + case jsonTimeFmtUnix: + t = time.Unix(i, 0) + case jsonTimeFmtUnixMilli: + t = time.UnixMilli(i) + case jsonTimeFmtUnixMicro: + t = time.UnixMicro(i) + case jsonTimeFmtUnixNano: + t = time.Unix(0, i) + default: + halt.errorStr("invalid timeFmtNum") + } + return + } + + bs = d.readUnescapedString() + var err error + for _, v := range d.timeFmtLayouts { + t, err = time.Parse(v, stringView(bs)) + if err == nil { + return + } + } + halt.errorStr("error decoding time") + return +} + +func (d *jsonDecDriverIO) ContainerType() (vt valueType) { + + d.advance() + + if d.tok == '{' { + return valueTypeMap + } else if d.tok == '[' { + return valueTypeArray + } else if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return valueTypeNil + } else if d.tok == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriverIO) decNumBytes() (bs []byte) { + d.advance() + if d.tok == '"' { + bs = d.r.jsonReadUntilDblQuote() + d.tok = 0 + } else if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + } else { + bs, d.tok = d.r.jsonReadNum() + } + return +} + +func (d *jsonDecDriverIO) DecodeUint64() (u uint64) { + b := d.decNumBytes() + u, neg, ok := parseInteger_bytes(b) + if neg { + halt.errorf("negative number cannot be decoded as uint64: %s", any(b)) + } + if !ok { + halt.onerror(strconvParseErr(b, "ParseUint")) + } + return +} + +func (d *jsonDecDriverIO) DecodeInt64() (v int64) { + return d.parseInt64(d.decNumBytes()) +} + +func (d *jsonDecDriverIO) parseInt64(b []byte) (v int64) { + u, neg, ok := parseInteger_bytes(b) + if !ok { + halt.onerror(strconvParseErr(b, "ParseInt")) + } + if chkOvf.Uint2Int(u, neg) { + halt.errorBytes("overflow decoding number from ", b) + } + if neg { + v = -int64(u) + } else { + v = int64(u) + } + return +} + +func (d *jsonDecDriverIO) DecodeFloat64() (f float64) { + var err error + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err = parseFloat64(bs) + halt.onerror(err) + return +} + +func (d *jsonDecDriverIO) DecodeFloat32() (f float32) { + var err error + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err = parseFloat32(bs) + halt.onerror(err) + return +} + +func (d *jsonDecDriverIO) advanceNil() (ok bool) { + d.advance() + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return true + } + return false +} + +func (d *jsonDecDriverIO) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + if d.advanceNil() { + return + } + if ext == SelfExt { + d.dec.decodeAs(rv, basetype, false) + } else { + d.dec.interfaceExtConvertAndDecode(rv, ext) + } +} + +func (d *jsonDecDriverIO) DecodeRawExt(re *RawExt) { + if d.advanceNil() { + return + } + d.dec.decode(&re.Value) +} + +func (d *jsonDecDriverIO) decBytesFromArray(bs []byte) []byte { + d.advance() + if d.tok != ']' { + bs = append(bs, uint8(d.DecodeUint64())) + d.advance() + } + for d.tok != ']' { + if d.tok != ',' { + halt.errorByte("read array element - expect char ',' but got char: ", d.tok) + } + d.tok = 0 + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + d.advance() + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriverIO) DecodeBytes() (bs []byte, state dBytesAttachState) { + d.advance() + state = dBytesDetach + if d.tok == 'n' { + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + return + } + state = dBytesAttachBuffer + + if d.rawext { + d.buf = d.buf[:0] + d.dec.interfaceExtConvertAndDecode(&d.buf, d.h.RawBytesExt) + bs = d.buf + return + } + + if d.tok == '[' { + d.tok = 0 + + bs = d.decBytesFromArray(d.buf[:0]) + d.buf = bs + return + } + + d.ensureReadingString() + bs1 := d.readUnescapedString() + + slen := base64.StdEncoding.DecodedLen(len(bs1)) + if slen == 0 { + bs = zeroByteSlice + state = dBytesDetach + } else if slen <= cap(d.buf) { + bs = d.buf[:slen] + } else { + d.buf = d.d.blist.putGet(d.buf, slen)[:slen] + bs = d.buf + } + var err error + for _, v := range d.byteFmters { + + slen, err = v.Decode(bs, bs1) + if err == nil { + bs = bs[:slen] + return + } + } + halt.errorf("error decoding byte string '%s': %v", any(bs1), err) + return +} + +func (d *jsonDecDriverIO) DecodeStringAsBytes() (bs []byte, state dBytesAttachState) { + d.advance() + + var cond bool + + if d.tok == '"' { + d.tok = 0 + bs, cond = d.dblQuoteStringAsBytes() + state = d.d.attachState(cond) + return + } + + state = dBytesDetach + + switch d.tok { + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + bs = jsonLitb[jsonLitF : jsonLitF+5] + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + bs = jsonLitb[jsonLitT : jsonLitT+4] + default: + + bs, d.tok = d.r.jsonReadNum() + state = d.d.attachState(!d.d.bytes) + } + return +} + +func (d *jsonDecDriverIO) ensureReadingString() { + if d.tok != '"' { + halt.errorByte("expecting string starting with '\"'; got ", d.tok) + } +} + +func (d *jsonDecDriverIO) readUnescapedString() (bs []byte) { + + bs = d.r.jsonReadUntilDblQuote() + d.tok = 0 + return +} + +func (d *jsonDecDriverIO) dblQuoteStringAsBytes() (buf []byte, usingBuf bool) { + bs, c := d.r.jsonReadAsisChars() + if c == '"' { + return bs, !d.d.bytes + } + buf = append(d.buf[:0], bs...) + + checkUtf8 := d.h.ValidateUnicode + usingBuf = true + + for { + + c = d.r.readn1() + + switch c { + case '"', '\\', '/', '\'': + buf = append(buf, c) + case 'b': + buf = append(buf, '\b') + case 'f': + buf = append(buf, '\f') + case 'n': + buf = append(buf, '\n') + case 'r': + buf = append(buf, '\r') + case 't': + buf = append(buf, '\t') + case 'u': + rr := d.appendStringAsBytesSlashU() + if checkUtf8 && rr == unicode.ReplacementChar { + d.buf = buf + halt.errorBytes("invalid UTF-8 character found after: ", buf) + } + buf = append(buf, d.bstr[:utf8.EncodeRune(d.bstr[:], rr)]...) + default: + d.buf = buf + halt.errorByte("unsupported escaped value: ", c) + } + + bs, c = d.r.jsonReadAsisChars() + buf = append(buf, bs...) + if c == '"' { + break + } + } + d.buf = buf + return +} + +func (d *jsonDecDriverIO) appendStringAsBytesSlashU() (r rune) { + var rr uint32 + cs := d.r.readn4() + if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar { + return unicode.ReplacementChar + } + r = rune(rr) + if utf16.IsSurrogate(r) { + csu := d.r.readn2() + cs = d.r.readn4() + if csu[0] == '\\' && csu[1] == 'u' { + if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar { + return unicode.ReplacementChar + } + return utf16.DecodeRune(r, rune(rr)) + } + return unicode.ReplacementChar + } + return +} + +func (d *jsonDecDriverIO) DecodeNaked() { + z := d.d.naked() + + d.advance() + var bs []byte + var err error + switch d.tok { + case 'n': + d.checkLit3([3]byte{'u', 'l', 'l'}, d.r.readn3()) + z.v = valueTypeNil + case 'f': + d.checkLit4([4]byte{'a', 'l', 's', 'e'}, d.r.readn4()) + z.v = valueTypeBool + z.b = false + case 't': + d.checkLit3([3]byte{'r', 'u', 'e'}, d.r.readn3()) + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap + case '[': + z.v = valueTypeArray + case '"': + + d.tok = 0 + bs, z.b = d.dblQuoteStringAsBytes() + att := d.d.attachState(z.b) + if jsonNakedBoolNumInQuotedStr && + d.h.MapKeyAsString && len(bs) > 0 && d.d.c == containerMapKey { + switch string(bs) { + + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + if err = jsonNakedNum(z, bs, d.h.PreferFloat, d.h.SignedInteger); err != nil { + z.v = valueTypeString + z.s = d.d.detach2Str(bs, att) + } + } + } else { + z.v = valueTypeString + z.s = d.d.detach2Str(bs, att) + } + default: + bs, d.tok = d.r.jsonReadNum() + if len(bs) == 0 { + halt.errorStr("decode number from empty string") + } + if err = jsonNakedNum(z, bs, d.h.PreferFloat, d.h.SignedInteger); err != nil { + halt.errorf("decode number from %s: %v", any(bs), err) + } + } +} + +func (e *jsonEncDriverIO) reset() { + e.dl = 0 + + e.typical = e.h.typical() + if e.h.HTMLCharsAsIs { + e.s = &jsonCharSafeBitset + } else { + e.s = &jsonCharHtmlSafeBitset + } + e.di = int8(e.h.Indent) + e.d = e.h.Indent != 0 + e.ks = e.h.MapKeyAsString + e.is = e.h.IntegerAsString + + var ho jsonHandleOpts + ho.reset(e.h) + e.timeFmt = ho.timeFmt + e.bytesFmt = ho.bytesFmt + e.timeFmtLayout = "" + e.byteFmter = nil + if len(ho.timeFmtLayouts) > 0 { + e.timeFmtLayout = ho.timeFmtLayouts[0] + } + if len(ho.byteFmters) > 0 { + e.byteFmter = ho.byteFmters[0] + } + e.rawext = ho.rawext +} + +func (d *jsonDecDriverIO) reset() { + d.buf = d.d.blist.check(d.buf, 256) + d.tok = 0 + + d.jsonHandleOpts.reset(d.h) +} + +func (d *jsonEncDriverIO) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*JsonHandle) + d.e = shared + if shared.bytes { + fp = jsonFpEncBytes + } else { + fp = jsonFpEncIO + } + + d.init2(enc) + return +} + +func (e *jsonEncDriverIO) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *jsonEncDriverIO) writerEnd() { e.w.end() } + +func (e *jsonEncDriverIO) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *jsonEncDriverIO) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *jsonDecDriverIO) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*JsonHandle) + d.d = shared + if shared.bytes { + fp = jsonFpDecBytes + } else { + fp = jsonFpDecIO + } + + d.init2(dec) + return +} + +func (d *jsonDecDriverIO) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *jsonDecDriverIO) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *jsonDecDriverIO) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *jsonDecDriverIO) descBd() (s string) { + halt.onerror(errJsonNoBd) + return +} + +func (d *jsonEncDriverIO) init2(enc encoderI) { + d.enc = enc + +} + +func (d *jsonDecDriverIO) init2(dec decoderI) { + d.dec = dec + + d.buf = d.buf[:0] + + d.d.jsms = d.h.MapKeyAsString +} diff --git a/vendor/github.com/ugorji/go/codec/json.notfastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/json.notfastpath.mono.generated.go new file mode 100644 index 000000000..d23c173be --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.notfastpath.mono.generated.go @@ -0,0 +1,52 @@ +//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +type fastpathEJsonBytes struct { + rt reflect.Type + encfn func(*encoderJsonBytes, *encFnInfo, reflect.Value) +} +type fastpathDJsonBytes struct { + rt reflect.Type + decfn func(*decoderJsonBytes, *decFnInfo, reflect.Value) +} +type fastpathEsJsonBytes [0]fastpathEJsonBytes +type fastpathDsJsonBytes [0]fastpathDJsonBytes + +func (helperEncDriverJsonBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonBytes) bool { + return false +} +func (helperDecDriverJsonBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonBytes) bool { + return false +} + +func (helperEncDriverJsonBytes) fastpathEList() (v *fastpathEsJsonBytes) { return } +func (helperDecDriverJsonBytes) fastpathDList() (v *fastpathDsJsonBytes) { return } + +type fastpathEJsonIO struct { + rt reflect.Type + encfn func(*encoderJsonIO, *encFnInfo, reflect.Value) +} +type fastpathDJsonIO struct { + rt reflect.Type + decfn func(*decoderJsonIO, *decFnInfo, reflect.Value) +} +type fastpathEsJsonIO [0]fastpathEJsonIO +type fastpathDsJsonIO [0]fastpathDJsonIO + +func (helperEncDriverJsonIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonIO) bool { + return false +} +func (helperDecDriverJsonIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonIO) bool { + return false +} + +func (helperEncDriverJsonIO) fastpathEList() (v *fastpathEsJsonIO) { return } +func (helperDecDriverJsonIO) fastpathDList() (v *fastpathDsJsonIO) { return } diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl deleted file mode 100644 index 53198064d..000000000 --- a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -//go:build !codec.notmammoth -// +build codec.notmammoth - -// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. - -package codec - -import "testing" -import "fmt" -import "reflect" - -// TestMammoth has all the different paths optimized in fast-path -// It has all the primitives, slices and maps. -// -// For each of those types, it has a pointer and a non-pointer field. - -func init() { _ = fmt.Printf } // so we can include fmt as needed - -type TestMammoth struct { - -{{range .Values }}{{if .Primitive -}} -{{ .MethodNamePfx "F" true }} {{ .Primitive }} -{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} -{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}} -{{ .MethodNamePfx "F" false }} []{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} -{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }} -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey -}} -{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}} -type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} -func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey -}} -type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -func __doTestMammothSlices(t *testing.T, h Handle) { -{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}} - var v{{$i}}va [8]{{ .Elem }} - for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { - {{/* - // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) - // - encode value to some []byte - // - decode into a length-wise-equal []byte - // - check if equal to initial slice - // - encode ptr to the value - // - check if encode bytes are same - // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice - // - decode into non-addressable slice of equal length, then larger len - // - for each decode, compare elem-by-elem to the original slice - // - - // - rinse and repeat for a MapBySlice version - // - - */ -}} - var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} - var bs{{$i}} []byte - v{{$i}}v1 = v - bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") - if v == nil { - v{{$i}}v2 = make([]{{ .Elem }}, 2) - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") - testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change - testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change - v{{$i}}v2 = make([]{{ .Elem }}, 2) - testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value - testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change - testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change - } else { - v{{$i}}v2 = make([]{{ .Elem }}, len(v)) - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") - v{{$i}}v2 = make([]{{ .Elem }}, len(v)) - testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr") - } - testReleaseBytes(bs{{$i}}) - // ... - bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") - v{{$i}}v2 = nil - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1") - if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below - testDeepEqualErr(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, t, "equal-array-v{{$i}}-p-1") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - v{{$i}}v2 = v{{$i}}va[:1:1] - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - v{{$i}}v2 = v{{$i}}va[:] - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap") - if len(v{{$i}}v1) > 1 { - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") - testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr") - v{{$i}}va = [8]{{ .Elem }}{} // clear the array - testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") - testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr") - } - testReleaseBytes(bs{{$i}}) - // ... - var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} - v{{$i}}v2 = nil - if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } - v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) - v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) - if v != nil { - bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") - testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") - testReleaseBytes(bs{{$i}}) - } - bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") - v{{$i}}v2 = nil - v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) - testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") - testReleaseBytes(bs{{$i}}) - } -{{end}}{{end}}{{end}} -} - -func __doTestMammothMaps(t *testing.T, h Handle) { -{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}} - for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { - // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) - var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} - var bs{{$i}} []byte - v{{$i}}v1 = v - bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") - if v != nil { - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr") - } - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len") - testReleaseBytes(bs{{$i}}) - bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") - v{{$i}}v2 = nil - testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") - testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil") - testReleaseBytes(bs{{$i}}) - // ... - if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map - var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} - v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) - v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) - if v != nil { - bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") - testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") - testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len") - testReleaseBytes(bs{{$i}}) - } - } -{{end}}{{end}}{{end}} - -} - -func doTestMammothMapsAndSlices(t *testing.T, h Handle) { - defer testSetup(t, &h)() - if mh, ok := h.(*MsgpackHandle); ok { - defer func(b bool) { mh.RawToString = b }(mh.RawToString) - mh.RawToString = true - } - __doTestMammothSlices(t, h) - __doTestMammothMaps(t, h) -} - -func doTestMammoth(t *testing.T, h Handle) { - defer testSetup(t, &h)() - if mh, ok := h.(*MsgpackHandle); ok { - defer func(b bool) { mh.RawToString = b }(mh.RawToString) - mh.RawToString = true - } - - name := h.Name() - var b []byte - - var m, m2 TestMammoth - testRandomFillRV(reflect.ValueOf(&m).Elem()) - b = testMarshalErr(&m, h, t, "mammoth-"+name) - - testUnmarshalErr(&m2, b, h, t, "mammoth-"+name) - testDeepEqualErr(&m, &m2, t, "mammoth-"+name) - testReleaseBytes(b) - - if testing.Short() { - t.Skipf("skipping rest of mammoth test in -short mode") - } - - var mm, mm2 TestMammoth2Wrapper - testRandomFillRV(reflect.ValueOf(&mm).Elem()) - b = testMarshalErr(&mm, h, t, "mammoth2-"+name) - // os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n")) - testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name) - testDeepEqualErr(&mm, &mm2, t, "mammoth2-"+name) - // testMammoth2(t, name, h) - testReleaseBytes(b) -} - -{{range $i, $e := .Formats -}} -func Test{{ . }}Mammoth(t *testing.T) { - doTestMammoth(t, test{{ . }}H) -} -{{end}} -{{range $i, $e := .Formats -}} -func Test{{ . }}MammothMapsAndSlices(t *testing.T) { - doTestMammothMapsAndSlices(t, test{{ . }}H) -} -{{end}} diff --git a/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl deleted file mode 100644 index 9fe56ec7f..000000000 --- a/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl +++ /dev/null @@ -1,101 +0,0 @@ -// +build !codec.notmammoth - -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT. - -package codec - -// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go.... -// -// Note: even though this is built based on fast-path and gen-helper, we will run these tests -// in all modes, including notfastpath, etc. -// -// Add test file for creating a mammoth generated file as _mammoth_generated.go -// - generate a second mammoth files in a different file: mammoth2_generated_test.go -// mammoth-test.go.tmpl will do this -// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags) -// - as part of TestMammoth, run it also -// - this will cover all the codecgen, gen-helper, etc in one full run -// - check in mammoth* files into github also -// -// Now, add some types: -// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it -// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types -// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) - - -// import "encoding/binary" - -import "fmt" - -type TestMammoth2 struct { - -{{range .Values }}{{if .Primitive }}{{/* -*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} -{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} -{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} -{{end}}{{end}}{{end}} - -{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* -*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} -{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} -{{end}}{{end}}{{end}} - -} - -// ----------- - -type testMammoth2Binary uint64 -func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { -data = make([]byte, 8) -bigenstd.PutUint64(data, uint64(x)) -return -} -func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { -*x = testMammoth2Binary(bigenstd.Uint64(data)) -return -} - -type testMammoth2Text uint64 -func (x testMammoth2Text) MarshalText() (data []byte, err error) { -data = []byte(fmt.Sprintf("%b", uint64(x))) -return -} -func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { -_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) -return -} - -type testMammoth2Json uint64 -func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { -data = []byte(fmt.Sprintf("%v", uint64(x))) -return -} -func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { -_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) -return -} - -type testMammoth2Basic [4]uint64 - -type TestMammoth2Wrapper struct { - V TestMammoth2 - T testMammoth2Text - B testMammoth2Binary - J testMammoth2Json - C testMammoth2Basic - M map[testMammoth2Basic]TestMammoth2 - L []TestMammoth2 - A [4]int64 - - Tcomplex128 complex128 - Tcomplex64 complex64 - Tbytes []uint8 - Tpbytes *[]uint8 -} diff --git a/vendor/github.com/ugorji/go/codec/mammoth_test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth_test.go.tmpl new file mode 100644 index 000000000..f6a02436e --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/mammoth_test.go.tmpl @@ -0,0 +1,324 @@ +//go:build !codec.notmammoth + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth_test.go.tmpl - DO NOT EDIT. + +package codec + +import "testing" +import "fmt" +import "reflect" + +// TestMammoth has all the different paths optimized in fastpath +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive -}} +{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}} +{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey -}} +{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +// ----------- + +// Increase codecoverage by covering all the codecgen paths, in fastpath .... +// +// Note: even though this is built based on fastpath, we will run these tests +// in all modes, including notfastpath, etc. +// +// Add test file for creating a mammoth generated file as _mammoth_generated.go +// +// Now, add some types: +// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it +// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types +// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) + +type testMammoth2Binary uint64 +func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { +data = make([]byte, 8) +bigenstd.PutUint64(data, uint64(x)) +return +} +func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { +*x = testMammoth2Binary(bigenstd.Uint64(data)) +return +} + +type testMammoth2Text uint64 +func (x testMammoth2Text) MarshalText() (data []byte, err error) { +data = []byte(fmt.Sprintf("%b", uint64(x))) +return +} +func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) +return +} + +type testMammoth2Json uint64 +func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { +data = []byte(fmt.Sprintf("%v", uint64(x))) +return +} +func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) +return +} + +type testMammoth2Basic [4]uint64 + +type TestMammoth2Wrapper struct { + V TestMammoth + T testMammoth2Text + B testMammoth2Binary + J testMammoth2Json + C testMammoth2Basic + M map[testMammoth2Basic]TestMammoth + L []TestMammoth + A [4]int64 + + Tcomplex128 complex128 + Tcomplex64 complex64 + Tbytes []uint8 + Tpbytes *[]uint8 +} + +// ----------- + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}} +type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey -}} +type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +func __doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}} + var v{{$i}}va [8]{{ .Elem }} + for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { + {{/* + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + // - encode value to some []byte + // - decode into a length-wise-equal []byte + // - check if equal to initial slice + // - encode ptr to the value + // - check if encode bytes are same + // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice + // - decode into non-addressable slice of equal length, then larger len + // - for each decode, compare elem-by-elem to the original slice + // - + // - rinse and repeat for a MapBySlice version + // - + */ -}} + var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} + var bs{{$i}} []byte + v{{$i}}v1 = v + bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v == nil { + v{{$i}}v2 = make([]{{ .Elem }}, 2) + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change + testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change + v{{$i}}v2 = make([]{{ .Elem }}, 2) + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value + testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change + testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change + } else { + v{{$i}}v2 = make([]{{ .Elem }}, len(v)) + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}") + v{{$i}}v2 = make([]{{ .Elem }}, len(v)) + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-noaddr") + } + testReleaseBytes(bs{{$i}}) + // ... + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1") + if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below + testDeepEqualErrHandle(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, h, t, "equal-array-v{{$i}}-p-1") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:1:1] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-1") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-len") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-cap") + if len(v{{$i}}v1) > 1 { + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], h, t, "equal-slice-v{{$i}}-p-len-noaddr") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], h, t, "equal-slice-v{{$i}}-p-cap-noaddr") + } + testReleaseBytes(bs{{$i}}) + // ... + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + if v != nil { + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-slice-v{{$i}}-custom") + testReleaseBytes(bs{{$i}}) + } + bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v2 = nil + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-slice-v{{$i}}-custom-p") + testReleaseBytes(bs{{$i}}) + } +{{end}}{{end}}{{end}} +} + +func __doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { + {{/* // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) */ -}} + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + var bs{{$i}} []byte + v{{$i}}v1 = v + bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v != nil { + v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) // reset map + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}") + v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) // reset map + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-noaddr") + } + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len") + testReleaseBytes(bs{{$i}}) + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") + testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-p-nil") + testReleaseBytes(bs{{$i}}) + // ... + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} + v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) + if v != nil { + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-map-v{{$i}}-p-len") + testReleaseBytes(bs{{$i}}) + } + type s{{$i}}T struct { + M map[{{ .MapKey }}]{{ .Elem }} + Mp *map[{{ .MapKey }}]{{ .Elem }} + } + var m{{$i}}v99 = map[{{ .MapKey }}]{{ .Elem }}{ + {{ zerocmd .MapKey }}: {{ zerocmd .Elem }}, + {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }}, + } + var s{{$i}}v1, s{{$i}}v2 s{{$i}}T + bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len") + testReleaseBytes(bs{{$i}}) + s{{$i}}v2 = s{{$i}}T{} + s{{$i}}v1.M = m{{$i}}v99 + bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len") + testReleaseBytes(bs{{$i}}) + s{{$i}}v2 = s{{$i}}T{} + s{{$i}}v1.Mp = &m{{$i}}v99 + bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len") + testReleaseBytes(bs{{$i}}) + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + defer testSetup(t, &h)() + if mh, ok := h.(*MsgpackHandle); ok { + defer func(b bool) { mh.RawToString = b }(mh.RawToString) + mh.RawToString = true + } + __doTestMammothSlices(t, h) + __doTestMammothMaps(t, h) +} + +func doTestMammoth(t *testing.T, h Handle) { + defer testSetup(t, &h)() + if mh, ok := h.(*MsgpackHandle); ok { + defer func(b bool) { mh.RawToString = b }(mh.RawToString) + mh.RawToString = true + } + + name := h.Name() + var b []byte + + var m, m2 TestMammoth + testRandomFillRV(reflect.ValueOf(&m).Elem()) + b = testMarshalErr(&m, h, t, "mammoth-"+name) + + testUnmarshalErr(&m2, b, h, t, "mammoth-"+name) + testDeepEqualErrHandle(&m, &m2, h, t, "mammoth-"+name) + testReleaseBytes(b) + + if testing.Short() { + t.Skipf("skipping rest of mammoth test in -short mode") + } + + var mm, mm2 TestMammoth2Wrapper + testRandomFillRV(reflect.ValueOf(&mm).Elem()) + b = testMarshalErr(&mm, h, t, "mammoth2-"+name) + // os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n")) + testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name) + testDeepEqualErrHandle(&mm, &mm2, h, t, "mammoth2-"+name) + // testMammoth2(t, name, h) + testReleaseBytes(b) +} + +{{range $i, $e := .Formats -}} +func Test{{ . }}Mammoth(t *testing.T) { + doTestMammoth(t, test{{ . }}H) +} +{{end}} +{{range $i, $e := .Formats -}} +func Test{{ . }}MammothMapsAndSlices(t *testing.T) { + doTestMammothMapsAndSlices(t, test{{ . }}H) +} +{{end}} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.base.go b/vendor/github.com/ugorji/go/codec/msgpack.base.go new file mode 100644 index 000000000..0388f0cec --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.base.go @@ -0,0 +1,299 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "fmt" + "io" + "net/rpc" + "reflect" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax byte = 0x7f + mpFixMapMin byte = 0x80 + mpFixMapMax byte = 0x8f + mpFixArrayMin byte = 0x90 + mpFixArrayMax byte = 0x9f + mpFixStrMin byte = 0xa0 + mpFixStrMax byte = 0xbf + mpNil byte = 0xc0 + _ byte = 0xc1 + mpFalse byte = 0xc2 + mpTrue byte = 0xc3 + mpFloat byte = 0xca + mpDouble byte = 0xcb + mpUint8 byte = 0xcc + mpUint16 byte = 0xcd + mpUint32 byte = 0xce + mpUint64 byte = 0xcf + mpInt8 byte = 0xd0 + mpInt16 byte = 0xd1 + mpInt32 byte = 0xd2 + mpInt64 byte = 0xd3 + + // extensions below + mpBin8 byte = 0xc4 + mpBin16 byte = 0xc5 + mpBin32 byte = 0xc6 + mpExt8 byte = 0xc7 + mpExt16 byte = 0xc8 + mpExt32 byte = 0xc9 + mpFixExt1 byte = 0xd4 + mpFixExt2 byte = 0xd5 + mpFixExt4 byte = 0xd6 + mpFixExt8 byte = 0xd7 + mpFixExt16 byte = 0xd8 + + mpStr8 byte = 0xd9 // new + mpStr16 byte = 0xda + mpStr32 byte = 0xdb + + mpArray16 byte = 0xdc + mpArray32 byte = 0xdd + + mpMap16 byte = 0xde + mpMap32 byte = 0xdf + + mpNegFixNumMin byte = 0xe0 + mpNegFixNumMax byte = 0xff +) + +var mpTimeExtTag int8 = -1 +var mpTimeExtTagU = uint8(mpTimeExtTag) + +var mpdescNames = map[byte]string{ + mpNil: "nil", + mpFalse: "false", + mpTrue: "true", + mpFloat: "float", + mpDouble: "float", + mpUint8: "uuint", + mpUint16: "uint", + mpUint32: "uint", + mpUint64: "uint", + mpInt8: "int", + mpInt16: "int", + mpInt32: "int", + mpInt64: "int", + + mpStr8: "string|bytes", + mpStr16: "string|bytes", + mpStr32: "string|bytes", + + mpBin8: "bytes", + mpBin16: "bytes", + mpBin32: "bytes", + + mpArray16: "array", + mpArray32: "array", + + mpMap16: "map", + mpMap32: "map", +} + +func mpdesc(bd byte) (s string) { + s = mpdescNames[bd] + if s == "" { + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax, + bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + s = "int" + case bd >= mpFixStrMin && bd <= mpFixStrMax: + s = "string|bytes" + case bd >= mpFixArrayMin && bd <= mpFixArrayMax: + s = "array" + case bd >= mpFixMapMin && bd <= mpFixMapMax: + s = "map" + case bd >= mpFixExt1 && bd <= mpFixExt16, + bd >= mpExt8 && bd <= mpExt32: + s = "ext" + default: + s = "unknown" + } + } + return +} + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff, bFixMin, b8, b16, b32 byte + // hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerRawLegacy = msgpackContainerType{ + 32, mpFixStrMin, 0, mpStr16, mpStr32, + } + msgpackContainerStr = msgpackContainerType{ + 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, + } + msgpackContainerBin = msgpackContainerType{ + 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, + } + msgpackContainerList = msgpackContainerType{ + 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, + } + msgpackContainerMap = msgpackContainerType{ + 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, + } +) + +//-------------------------------------------------- + +// MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + binaryEncodingType + notJsonType + BasicHandle + + // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. + NoFixedNum bool + + // WriteExt controls whether the new spec is honored. + // + // With WriteExt=true, we can encode configured extensions with extension tags + // and encode string/[]byte/extensions in a way compatible with the new spec + // but incompatible with the old spec. + // + // For compatibility with the old spec, set WriteExt=false. + // + // With WriteExt=false: + // configured extensions are serialized as raw bytes (not msgpack extensions). + // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type + // are not encoded. + WriteExt bool + + // PositiveIntUnsigned says to encode positive integers as unsigned. + PositiveIntUnsigned bool +} + +// Name returns the name of the handle: msgpack +func (h *MsgpackHandle) Name() string { return "msgpack" } + +func (h *MsgpackHandle) desc(bd byte) string { return mpdesc(bd) } + +// SetBytesExt sets an extension +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, makeExt(ext)) +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + *rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + if c.cls.Load().closed { + return io.ErrUnexpectedEOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + + var ba [1]byte + var n int + for { + n, err = c.r.Read(ba[:]) + if err != nil { + return + } + if n == 1 { + break + } + } + + var b = ba[0] + if b != fia { + err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) + } else { + err = c.read(&b) + if err == nil { + if b != expectTypeByte { + err = fmt.Errorf("%s - expecting %v but got %x/%s", msgBadDesc, expectTypeByte, b, mpdesc(b)) + } else { + err = c.read(msgid) + if err == nil { + err = c.read(methodOrError) + } + } + } + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// +// See GoRpc documentation, for information on buffering for better performance. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.fastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/msgpack.fastpath.mono.generated.go new file mode 100644 index 000000000..6d6a880e8 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.fastpath.mono.generated.go @@ -0,0 +1,12482 @@ +//go:build !notmono && !codec.notmono && !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "slices" + "sort" +) + +type fastpathEMsgpackBytes struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderMsgpackBytes, *encFnInfo, reflect.Value) +} +type fastpathDMsgpackBytes struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderMsgpackBytes, *decFnInfo, reflect.Value) +} +type fastpathEsMsgpackBytes [56]fastpathEMsgpackBytes +type fastpathDsMsgpackBytes [56]fastpathDMsgpackBytes +type fastpathETMsgpackBytes struct{} +type fastpathDTMsgpackBytes struct{} + +func (helperEncDriverMsgpackBytes) fastpathEList() *fastpathEsMsgpackBytes { + var i uint = 0 + var s fastpathEsMsgpackBytes + fn := func(v interface{}, fe func(*encoderMsgpackBytes, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathEMsgpackBytes{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderMsgpackBytes).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderMsgpackBytes).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderMsgpackBytes).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderMsgpackBytes).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderMsgpackBytes).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderMsgpackBytes).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderMsgpackBytes).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderMsgpackBytes).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderMsgpackBytes).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderMsgpackBytes).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderMsgpackBytes).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderMsgpackBytes).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderMsgpackBytes).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderMsgpackBytes).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderMsgpackBytes).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderMsgpackBytes).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderMsgpackBytes).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderMsgpackBytes).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderMsgpackBytes).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderMsgpackBytes).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderMsgpackBytes).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderMsgpackBytes).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderMsgpackBytes).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderMsgpackBytes).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderMsgpackBytes).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderMsgpackBytes).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderMsgpackBytes).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderMsgpackBytes).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderMsgpackBytes).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderMsgpackBytes).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderMsgpackBytes).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderMsgpackBytes).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderMsgpackBytes).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderMsgpackBytes).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderMsgpackBytes).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderMsgpackBytes).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderMsgpackBytes).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderMsgpackBytes).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderMsgpackBytes).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderMsgpackBytes).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderMsgpackBytes).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderMsgpackBytes).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderMsgpackBytes).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderMsgpackBytes).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderMsgpackBytes).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderMsgpackBytes).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderMsgpackBytes).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderMsgpackBytes).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderMsgpackBytes).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderMsgpackBytes).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderMsgpackBytes).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderMsgpackBytes).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderMsgpackBytes).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderMsgpackBytes).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderMsgpackBytes).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderMsgpackBytes).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverMsgpackBytes) fastpathDList() *fastpathDsMsgpackBytes { + var i uint = 0 + var s fastpathDsMsgpackBytes + fn := func(v interface{}, fd func(*decoderMsgpackBytes, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDMsgpackBytes{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderMsgpackBytes).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderMsgpackBytes).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderMsgpackBytes).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderMsgpackBytes).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderMsgpackBytes).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderMsgpackBytes).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderMsgpackBytes).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderMsgpackBytes).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderMsgpackBytes).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderMsgpackBytes).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderMsgpackBytes).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderMsgpackBytes).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderMsgpackBytes).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderMsgpackBytes).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderMsgpackBytes).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderMsgpackBytes).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderMsgpackBytes).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderMsgpackBytes).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderMsgpackBytes).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderMsgpackBytes).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderMsgpackBytes).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderMsgpackBytes).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderMsgpackBytes).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderMsgpackBytes).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderMsgpackBytes).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderMsgpackBytes).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderMsgpackBytes).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderMsgpackBytes).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderMsgpackBytes).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderMsgpackBytes).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderMsgpackBytes).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderMsgpackBytes).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderMsgpackBytes).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderMsgpackBytes).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderMsgpackBytes).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderMsgpackBytes).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderMsgpackBytes).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderMsgpackBytes).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderMsgpackBytes).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderMsgpackBytes).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderMsgpackBytes).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderMsgpackBytes).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderMsgpackBytes).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderMsgpackBytes).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderMsgpackBytes).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderMsgpackBytes).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderMsgpackBytes).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderMsgpackBytes).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderMsgpackBytes).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderMsgpackBytes).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderMsgpackBytes).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderMsgpackBytes).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderMsgpackBytes).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderMsgpackBytes).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderMsgpackBytes).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderMsgpackBytes).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverMsgpackBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackBytes) bool { + var ft fastpathETMsgpackBytes + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderMsgpackBytes) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETMsgpackBytes) EncSliceIntfV(v []interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceIntfV(v []interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETMsgpackBytes) EncSliceStringV(v []string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceStringV(v []string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETMsgpackBytes) EncSliceBytesV(v [][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceBytesV(v [][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETMsgpackBytes) EncSliceFloat32V(v []float32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceFloat32V(v []float32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETMsgpackBytes) EncSliceFloat64V(v []float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceFloat64V(v []float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETMsgpackBytes) EncSliceUint8V(v []uint8, e *encoderMsgpackBytes) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETMsgpackBytes) EncAsMapSliceUint8V(v []uint8, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETMsgpackBytes) EncSliceUint64V(v []uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceUint64V(v []uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETMsgpackBytes) EncSliceIntV(v []int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceIntV(v []int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETMsgpackBytes) EncSliceInt32V(v []int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceInt32V(v []int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETMsgpackBytes) EncSliceInt64V(v []int64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceInt64V(v []int64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackBytes + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETMsgpackBytes) EncSliceBoolV(v []bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackBytes) EncAsMapSliceBoolV(v []bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETMsgpackBytes) EncMapStringIntfV(v map[string]interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETMsgpackBytes) EncMapStringStringV(v map[string]string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETMsgpackBytes) EncMapStringBytesV(v map[string][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETMsgpackBytes) EncMapStringUint8V(v map[string]uint8, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETMsgpackBytes) EncMapStringUint64V(v map[string]uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETMsgpackBytes) EncMapStringIntV(v map[string]int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETMsgpackBytes) EncMapStringInt32V(v map[string]int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETMsgpackBytes) EncMapStringFloat64V(v map[string]float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETMsgpackBytes) EncMapStringBoolV(v map[string]bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETMsgpackBytes) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETMsgpackBytes) EncMapUint8StringV(v map[uint8]string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETMsgpackBytes) EncMapUint8BytesV(v map[uint8][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETMsgpackBytes) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETMsgpackBytes) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETMsgpackBytes) EncMapUint8IntV(v map[uint8]int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETMsgpackBytes) EncMapUint8Int32V(v map[uint8]int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETMsgpackBytes) EncMapUint8Float64V(v map[uint8]float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETMsgpackBytes) EncMapUint8BoolV(v map[uint8]bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETMsgpackBytes) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETMsgpackBytes) EncMapUint64StringV(v map[uint64]string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETMsgpackBytes) EncMapUint64BytesV(v map[uint64][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETMsgpackBytes) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETMsgpackBytes) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETMsgpackBytes) EncMapUint64IntV(v map[uint64]int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETMsgpackBytes) EncMapUint64Int32V(v map[uint64]int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETMsgpackBytes) EncMapUint64Float64V(v map[uint64]float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETMsgpackBytes) EncMapUint64BoolV(v map[uint64]bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETMsgpackBytes) EncMapIntIntfV(v map[int]interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETMsgpackBytes) EncMapIntStringV(v map[int]string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETMsgpackBytes) EncMapIntBytesV(v map[int][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETMsgpackBytes) EncMapIntUint8V(v map[int]uint8, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETMsgpackBytes) EncMapIntUint64V(v map[int]uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETMsgpackBytes) EncMapIntIntV(v map[int]int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETMsgpackBytes) EncMapIntInt32V(v map[int]int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETMsgpackBytes) EncMapIntFloat64V(v map[int]float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETMsgpackBytes) EncMapIntBoolV(v map[int]bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETMsgpackBytes) EncMapInt32IntfV(v map[int32]interface{}, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETMsgpackBytes) EncMapInt32StringV(v map[int32]string, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETMsgpackBytes) EncMapInt32BytesV(v map[int32][]byte, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETMsgpackBytes) EncMapInt32Uint8V(v map[int32]uint8, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETMsgpackBytes) EncMapInt32Uint64V(v map[int32]uint64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETMsgpackBytes) EncMapInt32IntV(v map[int32]int, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETMsgpackBytes) EncMapInt32Int32V(v map[int32]int32, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETMsgpackBytes) EncMapInt32Float64V(v map[int32]float64, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackBytes) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackBytes{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETMsgpackBytes) EncMapInt32BoolV(v map[int32]bool, e *encoderMsgpackBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverMsgpackBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackBytes) bool { + var ft fastpathDTMsgpackBytes + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderMsgpackBytes) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceIntfY(v []interface{}, d *decoderMsgpackBytes) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceIntfN(v []interface{}, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceStringY(v []string, d *decoderMsgpackBytes) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceStringN(v []string, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceBytesY(v [][]byte, d *decoderMsgpackBytes) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceBytesN(v [][]byte, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceFloat32Y(v []float32, d *decoderMsgpackBytes) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceFloat32N(v []float32, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceFloat64Y(v []float64, d *decoderMsgpackBytes) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceFloat64N(v []float64, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceUint8Y(v []uint8, d *decoderMsgpackBytes) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceUint8N(v []uint8, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderMsgpackBytes) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceUint64Y(v []uint64, d *decoderMsgpackBytes) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceUint64N(v []uint64, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceIntY(v []int, d *decoderMsgpackBytes) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceIntN(v []int, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceInt32Y(v []int32, d *decoderMsgpackBytes) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceInt32N(v []int32, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceInt64Y(v []int64, d *decoderMsgpackBytes) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceInt64N(v []int64, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTMsgpackBytes) DecSliceBoolY(v []bool, d *decoderMsgpackBytes) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackBytes) DecSliceBoolN(v []bool, d *decoderMsgpackBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringStringL(v map[string]string, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringIntL(v map[string]int, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntStringL(v map[int]string, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntIntL(v map[int]int, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackBytes) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackBytes) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderMsgpackBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} + +type fastpathEMsgpackIO struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderMsgpackIO, *encFnInfo, reflect.Value) +} +type fastpathDMsgpackIO struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderMsgpackIO, *decFnInfo, reflect.Value) +} +type fastpathEsMsgpackIO [56]fastpathEMsgpackIO +type fastpathDsMsgpackIO [56]fastpathDMsgpackIO +type fastpathETMsgpackIO struct{} +type fastpathDTMsgpackIO struct{} + +func (helperEncDriverMsgpackIO) fastpathEList() *fastpathEsMsgpackIO { + var i uint = 0 + var s fastpathEsMsgpackIO + fn := func(v interface{}, fe func(*encoderMsgpackIO, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathEMsgpackIO{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderMsgpackIO).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderMsgpackIO).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderMsgpackIO).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderMsgpackIO).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderMsgpackIO).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderMsgpackIO).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderMsgpackIO).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderMsgpackIO).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderMsgpackIO).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderMsgpackIO).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderMsgpackIO).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderMsgpackIO).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderMsgpackIO).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderMsgpackIO).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderMsgpackIO).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderMsgpackIO).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderMsgpackIO).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderMsgpackIO).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderMsgpackIO).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderMsgpackIO).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderMsgpackIO).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderMsgpackIO).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderMsgpackIO).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderMsgpackIO).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderMsgpackIO).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderMsgpackIO).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderMsgpackIO).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderMsgpackIO).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderMsgpackIO).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderMsgpackIO).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderMsgpackIO).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderMsgpackIO).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderMsgpackIO).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderMsgpackIO).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderMsgpackIO).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderMsgpackIO).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderMsgpackIO).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderMsgpackIO).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderMsgpackIO).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderMsgpackIO).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderMsgpackIO).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderMsgpackIO).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderMsgpackIO).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderMsgpackIO).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderMsgpackIO).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderMsgpackIO).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderMsgpackIO).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderMsgpackIO).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderMsgpackIO).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderMsgpackIO).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderMsgpackIO).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderMsgpackIO).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderMsgpackIO).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderMsgpackIO).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderMsgpackIO).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderMsgpackIO).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverMsgpackIO) fastpathDList() *fastpathDsMsgpackIO { + var i uint = 0 + var s fastpathDsMsgpackIO + fn := func(v interface{}, fd func(*decoderMsgpackIO, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDMsgpackIO{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderMsgpackIO).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderMsgpackIO).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderMsgpackIO).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderMsgpackIO).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderMsgpackIO).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderMsgpackIO).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderMsgpackIO).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderMsgpackIO).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderMsgpackIO).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderMsgpackIO).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderMsgpackIO).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderMsgpackIO).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderMsgpackIO).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderMsgpackIO).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderMsgpackIO).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderMsgpackIO).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderMsgpackIO).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderMsgpackIO).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderMsgpackIO).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderMsgpackIO).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderMsgpackIO).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderMsgpackIO).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderMsgpackIO).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderMsgpackIO).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderMsgpackIO).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderMsgpackIO).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderMsgpackIO).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderMsgpackIO).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderMsgpackIO).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderMsgpackIO).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderMsgpackIO).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderMsgpackIO).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderMsgpackIO).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderMsgpackIO).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderMsgpackIO).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderMsgpackIO).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderMsgpackIO).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderMsgpackIO).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderMsgpackIO).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderMsgpackIO).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderMsgpackIO).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderMsgpackIO).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderMsgpackIO).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderMsgpackIO).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderMsgpackIO).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderMsgpackIO).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderMsgpackIO).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderMsgpackIO).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderMsgpackIO).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderMsgpackIO).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderMsgpackIO).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderMsgpackIO).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderMsgpackIO).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderMsgpackIO).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderMsgpackIO).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderMsgpackIO).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverMsgpackIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackIO) bool { + var ft fastpathETMsgpackIO + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderMsgpackIO) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETMsgpackIO) EncSliceIntfV(v []interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceIntfV(v []interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETMsgpackIO) EncSliceStringV(v []string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceStringV(v []string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETMsgpackIO) EncSliceBytesV(v [][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceBytesV(v [][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETMsgpackIO) EncSliceFloat32V(v []float32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceFloat32V(v []float32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETMsgpackIO) EncSliceFloat64V(v []float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceFloat64V(v []float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETMsgpackIO) EncSliceUint8V(v []uint8, e *encoderMsgpackIO) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETMsgpackIO) EncAsMapSliceUint8V(v []uint8, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETMsgpackIO) EncSliceUint64V(v []uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceUint64V(v []uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETMsgpackIO) EncSliceIntV(v []int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceIntV(v []int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETMsgpackIO) EncSliceInt32V(v []int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceInt32V(v []int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETMsgpackIO) EncSliceInt64V(v []int64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceInt64V(v []int64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETMsgpackIO + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETMsgpackIO) EncSliceBoolV(v []bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETMsgpackIO) EncAsMapSliceBoolV(v []bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETMsgpackIO) EncMapStringIntfV(v map[string]interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETMsgpackIO) EncMapStringStringV(v map[string]string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETMsgpackIO) EncMapStringBytesV(v map[string][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETMsgpackIO) EncMapStringUint8V(v map[string]uint8, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETMsgpackIO) EncMapStringUint64V(v map[string]uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETMsgpackIO) EncMapStringIntV(v map[string]int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETMsgpackIO) EncMapStringInt32V(v map[string]int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETMsgpackIO) EncMapStringFloat64V(v map[string]float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETMsgpackIO) EncMapStringBoolV(v map[string]bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETMsgpackIO) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETMsgpackIO) EncMapUint8StringV(v map[uint8]string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETMsgpackIO) EncMapUint8BytesV(v map[uint8][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETMsgpackIO) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETMsgpackIO) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETMsgpackIO) EncMapUint8IntV(v map[uint8]int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETMsgpackIO) EncMapUint8Int32V(v map[uint8]int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETMsgpackIO) EncMapUint8Float64V(v map[uint8]float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETMsgpackIO) EncMapUint8BoolV(v map[uint8]bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETMsgpackIO) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETMsgpackIO) EncMapUint64StringV(v map[uint64]string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETMsgpackIO) EncMapUint64BytesV(v map[uint64][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETMsgpackIO) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETMsgpackIO) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETMsgpackIO) EncMapUint64IntV(v map[uint64]int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETMsgpackIO) EncMapUint64Int32V(v map[uint64]int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETMsgpackIO) EncMapUint64Float64V(v map[uint64]float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETMsgpackIO) EncMapUint64BoolV(v map[uint64]bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETMsgpackIO) EncMapIntIntfV(v map[int]interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETMsgpackIO) EncMapIntStringV(v map[int]string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETMsgpackIO) EncMapIntBytesV(v map[int][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETMsgpackIO) EncMapIntUint8V(v map[int]uint8, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETMsgpackIO) EncMapIntUint64V(v map[int]uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETMsgpackIO) EncMapIntIntV(v map[int]int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETMsgpackIO) EncMapIntInt32V(v map[int]int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETMsgpackIO) EncMapIntFloat64V(v map[int]float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETMsgpackIO) EncMapIntBoolV(v map[int]bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETMsgpackIO) EncMapInt32IntfV(v map[int32]interface{}, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETMsgpackIO) EncMapInt32StringV(v map[int32]string, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETMsgpackIO) EncMapInt32BytesV(v map[int32][]byte, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETMsgpackIO) EncMapInt32Uint8V(v map[int32]uint8, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETMsgpackIO) EncMapInt32Uint64V(v map[int32]uint64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETMsgpackIO) EncMapInt32IntV(v map[int32]int, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETMsgpackIO) EncMapInt32Int32V(v map[int32]int32, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETMsgpackIO) EncMapInt32Float64V(v map[int32]float64, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderMsgpackIO) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETMsgpackIO{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETMsgpackIO) EncMapInt32BoolV(v map[int32]bool, e *encoderMsgpackIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverMsgpackIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackIO) bool { + var ft fastpathDTMsgpackIO + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderMsgpackIO) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTMsgpackIO) DecSliceIntfY(v []interface{}, d *decoderMsgpackIO) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceIntfN(v []interface{}, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTMsgpackIO) DecSliceStringY(v []string, d *decoderMsgpackIO) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceStringN(v []string, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTMsgpackIO) DecSliceBytesY(v [][]byte, d *decoderMsgpackIO) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceBytesN(v [][]byte, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTMsgpackIO) DecSliceFloat32Y(v []float32, d *decoderMsgpackIO) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceFloat32N(v []float32, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTMsgpackIO) DecSliceFloat64Y(v []float64, d *decoderMsgpackIO) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceFloat64N(v []float64, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTMsgpackIO) DecSliceUint8Y(v []uint8, d *decoderMsgpackIO) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceUint8N(v []uint8, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderMsgpackIO) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTMsgpackIO) DecSliceUint64Y(v []uint64, d *decoderMsgpackIO) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceUint64N(v []uint64, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTMsgpackIO) DecSliceIntY(v []int, d *decoderMsgpackIO) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceIntN(v []int, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTMsgpackIO) DecSliceInt32Y(v []int32, d *decoderMsgpackIO) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceInt32N(v []int32, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTMsgpackIO) DecSliceInt64Y(v []int64, d *decoderMsgpackIO) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceInt64N(v []int64, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTMsgpackIO) DecSliceBoolY(v []bool, d *decoderMsgpackIO) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTMsgpackIO) DecSliceBoolN(v []bool, d *decoderMsgpackIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringStringL(v map[string]string, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringIntL(v map[string]int, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntStringL(v map[int]string, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntIntL(v map[int]int, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderMsgpackIO) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTMsgpackIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTMsgpackIO) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderMsgpackIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go index c0861df5e..ed8ca99a6 100644 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -1,3 +1,5 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -18,247 +20,106 @@ For compatibility with behaviour of msgpack-c reference implementation: package codec import ( - "fmt" "io" "math" - "net/rpc" "reflect" "time" "unicode/utf8" ) -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax byte = 0x7f - mpFixMapMin byte = 0x80 - mpFixMapMax byte = 0x8f - mpFixArrayMin byte = 0x90 - mpFixArrayMax byte = 0x9f - mpFixStrMin byte = 0xa0 - mpFixStrMax byte = 0xbf - mpNil byte = 0xc0 - _ byte = 0xc1 - mpFalse byte = 0xc2 - mpTrue byte = 0xc3 - mpFloat byte = 0xca - mpDouble byte = 0xcb - mpUint8 byte = 0xcc - mpUint16 byte = 0xcd - mpUint32 byte = 0xce - mpUint64 byte = 0xcf - mpInt8 byte = 0xd0 - mpInt16 byte = 0xd1 - mpInt32 byte = 0xd2 - mpInt64 byte = 0xd3 - - // extensions below - mpBin8 byte = 0xc4 - mpBin16 byte = 0xc5 - mpBin32 byte = 0xc6 - mpExt8 byte = 0xc7 - mpExt16 byte = 0xc8 - mpExt32 byte = 0xc9 - mpFixExt1 byte = 0xd4 - mpFixExt2 byte = 0xd5 - mpFixExt4 byte = 0xd6 - mpFixExt8 byte = 0xd7 - mpFixExt16 byte = 0xd8 - - mpStr8 byte = 0xd9 // new - mpStr16 byte = 0xda - mpStr32 byte = 0xdb - - mpArray16 byte = 0xdc - mpArray32 byte = 0xdd - - mpMap16 byte = 0xde - mpMap32 byte = 0xdf - - mpNegFixNumMin byte = 0xe0 - mpNegFixNumMax byte = 0xff -) - -var mpTimeExtTag int8 = -1 -var mpTimeExtTagU = uint8(mpTimeExtTag) - -var mpdescNames = map[byte]string{ - mpNil: "nil", - mpFalse: "false", - mpTrue: "true", - mpFloat: "float", - mpDouble: "float", - mpUint8: "uuint", - mpUint16: "uint", - mpUint32: "uint", - mpUint64: "uint", - mpInt8: "int", - mpInt16: "int", - mpInt32: "int", - mpInt64: "int", - - mpStr8: "string|bytes", - mpStr16: "string|bytes", - mpStr32: "string|bytes", - - mpBin8: "bytes", - mpBin16: "bytes", - mpBin32: "bytes", - - mpArray16: "array", - mpArray32: "array", - - mpMap16: "map", - mpMap32: "map", -} - -func mpdesc(bd byte) (s string) { - s = mpdescNames[bd] - if s == "" { - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax, - bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - s = "int" - case bd >= mpFixStrMin && bd <= mpFixStrMax: - s = "string|bytes" - case bd >= mpFixArrayMin && bd <= mpFixArrayMax: - s = "array" - case bd >= mpFixMapMin && bd <= mpFixMapMax: - s = "map" - case bd >= mpFixExt1 && bd <= mpFixExt16, - bd >= mpExt8 && bd <= mpExt32: - s = "ext" - default: - s = "unknown" - } - } - return -} - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff, bFixMin, b8, b16, b32 byte - // hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerRawLegacy = msgpackContainerType{ - 32, mpFixStrMin, 0, mpStr16, mpStr32, - } - msgpackContainerStr = msgpackContainerType{ - 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, - } - msgpackContainerBin = msgpackContainerType{ - 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, - } - msgpackContainerList = msgpackContainerType{ - 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, - } - msgpackContainerMap = msgpackContainerType{ - 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, - } -) - //--------------------------------------------- -type msgpackEncDriver struct { +type msgpackEncDriver[T encWriter] struct { noBuiltInTypes encDriverNoopContainerWriter encDriverNoState + encDriverContainerNoTrackerT + encInit2er + h *MsgpackHandle + e *encoderBase + w T // x [8]byte - e Encoder } -func (e *msgpackEncDriver) encoder() *Encoder { - return &e.e +func (e *msgpackEncDriver[T]) EncodeNil() { + e.w.writen1(mpNil) } -func (e *msgpackEncDriver) EncodeNil() { - e.e.encWr.writen1(mpNil) -} - -func (e *msgpackEncDriver) EncodeInt(i int64) { +func (e *msgpackEncDriver[T]) EncodeInt(i int64) { if e.h.PositiveIntUnsigned && i >= 0 { e.EncodeUint(uint64(i)) } else if i > math.MaxInt8 { if i <= math.MaxInt16 { - e.e.encWr.writen1(mpInt16) - bigen.writeUint16(e.e.w(), uint16(i)) + e.w.writen1(mpInt16) + e.w.writen2(bigen.PutUint16(uint16(i))) } else if i <= math.MaxInt32 { - e.e.encWr.writen1(mpInt32) - bigen.writeUint32(e.e.w(), uint32(i)) + e.w.writen1(mpInt32) + e.w.writen4(bigen.PutUint32(uint32(i))) } else { - e.e.encWr.writen1(mpInt64) - bigen.writeUint64(e.e.w(), uint64(i)) + e.w.writen1(mpInt64) + e.w.writen8(bigen.PutUint64(uint64(i))) } } else if i >= -32 { if e.h.NoFixedNum { - e.e.encWr.writen2(mpInt8, byte(i)) + e.w.writen2(mpInt8, byte(i)) } else { - e.e.encWr.writen1(byte(i)) + e.w.writen1(byte(i)) } } else if i >= math.MinInt8 { - e.e.encWr.writen2(mpInt8, byte(i)) + e.w.writen2(mpInt8, byte(i)) } else if i >= math.MinInt16 { - e.e.encWr.writen1(mpInt16) - bigen.writeUint16(e.e.w(), uint16(i)) + e.w.writen1(mpInt16) + e.w.writen2(bigen.PutUint16(uint16(i))) } else if i >= math.MinInt32 { - e.e.encWr.writen1(mpInt32) - bigen.writeUint32(e.e.w(), uint32(i)) + e.w.writen1(mpInt32) + e.w.writen4(bigen.PutUint32(uint32(i))) } else { - e.e.encWr.writen1(mpInt64) - bigen.writeUint64(e.e.w(), uint64(i)) + e.w.writen1(mpInt64) + e.w.writen8(bigen.PutUint64(uint64(i))) } } -func (e *msgpackEncDriver) EncodeUint(i uint64) { +func (e *msgpackEncDriver[T]) EncodeUint(i uint64) { if i <= math.MaxInt8 { if e.h.NoFixedNum { - e.e.encWr.writen2(mpUint8, byte(i)) + e.w.writen2(mpUint8, byte(i)) } else { - e.e.encWr.writen1(byte(i)) + e.w.writen1(byte(i)) } } else if i <= math.MaxUint8 { - e.e.encWr.writen2(mpUint8, byte(i)) + e.w.writen2(mpUint8, byte(i)) } else if i <= math.MaxUint16 { - e.e.encWr.writen1(mpUint16) - bigen.writeUint16(e.e.w(), uint16(i)) + e.w.writen1(mpUint16) + e.w.writen2(bigen.PutUint16(uint16(i))) } else if i <= math.MaxUint32 { - e.e.encWr.writen1(mpUint32) - bigen.writeUint32(e.e.w(), uint32(i)) + e.w.writen1(mpUint32) + e.w.writen4(bigen.PutUint32(uint32(i))) } else { - e.e.encWr.writen1(mpUint64) - bigen.writeUint64(e.e.w(), uint64(i)) + e.w.writen1(mpUint64) + e.w.writen8(bigen.PutUint64(uint64(i))) } } -func (e *msgpackEncDriver) EncodeBool(b bool) { +func (e *msgpackEncDriver[T]) EncodeBool(b bool) { if b { - e.e.encWr.writen1(mpTrue) + e.w.writen1(mpTrue) } else { - e.e.encWr.writen1(mpFalse) + e.w.writen1(mpFalse) } } -func (e *msgpackEncDriver) EncodeFloat32(f float32) { - e.e.encWr.writen1(mpFloat) - bigen.writeUint32(e.e.w(), math.Float32bits(f)) +func (e *msgpackEncDriver[T]) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) } -func (e *msgpackEncDriver) EncodeFloat64(f float64) { - e.e.encWr.writen1(mpDouble) - bigen.writeUint64(e.e.w(), math.Float64bits(f)) +func (e *msgpackEncDriver[T]) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) } -func (e *msgpackEncDriver) EncodeTime(t time.Time) { +func (e *msgpackEncDriver[T]) EncodeTime(t time.Time) { if t.IsZero() { e.EncodeNil() return @@ -282,33 +143,33 @@ func (e *msgpackEncDriver) EncodeTime(t time.Time) { } switch l { case 4: - bigen.writeUint32(e.e.w(), uint32(data64)) + e.w.writen4(bigen.PutUint32(uint32(data64))) case 8: - bigen.writeUint64(e.e.w(), data64) + e.w.writen8(bigen.PutUint64(data64)) case 12: - bigen.writeUint32(e.e.w(), uint32(nsec)) - bigen.writeUint64(e.e.w(), uint64(sec)) + e.w.writen4(bigen.PutUint32(uint32(nsec))) + e.w.writen8(bigen.PutUint64(uint64(sec))) } } -func (e *msgpackEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (e *msgpackEncDriver[T]) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { var bs0, bs []byte if ext == SelfExt { bs0 = e.e.blist.get(1024) bs = bs0 - e.e.sideEncode(v, basetype, &bs) + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) } else { bs = ext.WriteExt(v) } if bs == nil { - e.EncodeNil() + e.writeNilBytes() goto END } if e.h.WriteExt { e.encodeExtPreamble(uint8(xtag), len(bs)) - e.e.encWr.writeb(bs) + e.w.writeb(bs) } else { - e.EncodeStringBytesRaw(bs) + e.EncodeBytes(bs) } END: if ext == SelfExt { @@ -319,45 +180,55 @@ END: } } -func (e *msgpackEncDriver) EncodeRawExt(re *RawExt) { +func (e *msgpackEncDriver[T]) EncodeRawExt(re *RawExt) { e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.e.encWr.writeb(re.Data) + e.w.writeb(re.Data) } -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { +func (e *msgpackEncDriver[T]) encodeExtPreamble(xtag byte, l int) { if l == 1 { - e.e.encWr.writen2(mpFixExt1, xtag) + e.w.writen2(mpFixExt1, xtag) } else if l == 2 { - e.e.encWr.writen2(mpFixExt2, xtag) + e.w.writen2(mpFixExt2, xtag) } else if l == 4 { - e.e.encWr.writen2(mpFixExt4, xtag) + e.w.writen2(mpFixExt4, xtag) } else if l == 8 { - e.e.encWr.writen2(mpFixExt8, xtag) + e.w.writen2(mpFixExt8, xtag) } else if l == 16 { - e.e.encWr.writen2(mpFixExt16, xtag) + e.w.writen2(mpFixExt16, xtag) } else if l < 256 { - e.e.encWr.writen2(mpExt8, byte(l)) - e.e.encWr.writen1(xtag) + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) } else if l < 65536 { - e.e.encWr.writen1(mpExt16) - bigen.writeUint16(e.e.w(), uint16(l)) - e.e.encWr.writen1(xtag) + e.w.writen1(mpExt16) + e.w.writen2(bigen.PutUint16(uint16(l))) + e.w.writen1(xtag) } else { - e.e.encWr.writen1(mpExt32) - bigen.writeUint32(e.e.w(), uint32(l)) - e.e.encWr.writen1(xtag) + e.w.writen1(mpExt32) + e.w.writen4(bigen.PutUint32(uint32(l))) + e.w.writen1(xtag) } } -func (e *msgpackEncDriver) WriteArrayStart(length int) { +func (e *msgpackEncDriver[T]) WriteArrayStart(length int) { e.writeContainerLen(msgpackContainerList, length) } -func (e *msgpackEncDriver) WriteMapStart(length int) { +func (e *msgpackEncDriver[T]) WriteMapStart(length int) { e.writeContainerLen(msgpackContainerMap, length) } -func (e *msgpackEncDriver) EncodeString(s string) { +func (e *msgpackEncDriver[T]) WriteArrayEmpty() { + // e.WriteArrayStart(0) = e.writeContainerLen(msgpackContainerList, 0) + e.w.writen1(mpFixArrayMin) +} + +func (e *msgpackEncDriver[T]) WriteMapEmpty() { + // e.WriteMapStart(0) = e.writeContainerLen(msgpackContainerMap, 0) + e.w.writen1(mpFixMapMin) +} + +func (e *msgpackEncDriver[T]) EncodeString(s string) { var ct msgpackContainerType if e.h.WriteExt { if e.h.StringToRaw { @@ -370,53 +241,78 @@ func (e *msgpackEncDriver) EncodeString(s string) { } e.writeContainerLen(ct, len(s)) if len(s) > 0 { - e.e.encWr.writestr(s) + e.w.writestr(s) } } -func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) { - if bs == nil { - e.EncodeNil() - return - } +func (e *msgpackEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *msgpackEncDriver[T]) EncodeStringBytesRaw(bs []byte) { if e.h.WriteExt { e.writeContainerLen(msgpackContainerBin, len(bs)) } else { e.writeContainerLen(msgpackContainerRawLegacy, len(bs)) } if len(bs) > 0 { - e.e.encWr.writeb(bs) + e.w.writeb(bs) } } -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { +func (e *msgpackEncDriver[T]) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *msgpackEncDriver[T]) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = mpNil + } + e.w.writen1(v) +} + +func (e *msgpackEncDriver[T]) writeNilArray() { + e.writeNilOr(mpFixArrayMin) +} + +func (e *msgpackEncDriver[T]) writeNilMap() { + e.writeNilOr(mpFixMapMin) +} + +func (e *msgpackEncDriver[T]) writeNilBytes() { + e.writeNilOr(mpFixStrMin) +} + +func (e *msgpackEncDriver[T]) writeContainerLen(ct msgpackContainerType, l int) { if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { - e.e.encWr.writen1(ct.bFixMin | byte(l)) + e.w.writen1(ct.bFixMin | byte(l)) } else if ct.b8 > 0 && l < 256 { - e.e.encWr.writen2(ct.b8, uint8(l)) + e.w.writen2(ct.b8, uint8(l)) } else if l < 65536 { - e.e.encWr.writen1(ct.b16) - bigen.writeUint16(e.e.w(), uint16(l)) + e.w.writen1(ct.b16) + e.w.writen2(bigen.PutUint16(uint16(l))) } else { - e.e.encWr.writen1(ct.b32) - bigen.writeUint32(e.e.w(), uint32(l)) + e.w.writen1(ct.b32) + e.w.writen4(bigen.PutUint32(uint32(l))) } } //--------------------------------------------- -type msgpackDecDriver struct { +type msgpackDecDriver[T decReader] struct { decDriverNoopContainerReader decDriverNoopNumberHelper - h *MsgpackHandle - bdAndBdread - _ bool - noBuiltInTypes - d Decoder -} + decInit2er -func (d *msgpackDecDriver) decoder() *Decoder { - return &d.d + h *MsgpackHandle + d *decoderBase + r T + + bdAndBdread + // bytes bool + noBuiltInTypes } // Note: This returns either a primitive (int, bool, etc) for non-containers, @@ -424,7 +320,7 @@ func (d *msgpackDecDriver) decoder() *Decoder { // It is called when a nil interface{} is passed, leaving it up to the DecDriver // to introspect the stream and decide how best to decode. // It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) DecodeNaked() { +func (d *msgpackDecDriver[T]) DecodeNaked() { if !d.bdRead { d.readNextBd() } @@ -445,36 +341,36 @@ func (d *msgpackDecDriver) DecodeNaked() { case mpFloat: n.v = valueTypeFloat - n.f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4()))) + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) case mpDouble: n.v = valueTypeFloat - n.f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8())) + n.f = math.Float64frombits(bigen.Uint64(d.r.readn8())) case mpUint8: n.v = valueTypeUint - n.u = uint64(d.d.decRd.readn1()) + n.u = uint64(d.r.readn1()) case mpUint16: n.v = valueTypeUint - n.u = uint64(bigen.Uint16(d.d.decRd.readn2())) + n.u = uint64(bigen.Uint16(d.r.readn2())) case mpUint32: n.v = valueTypeUint - n.u = uint64(bigen.Uint32(d.d.decRd.readn4())) + n.u = uint64(bigen.Uint32(d.r.readn4())) case mpUint64: n.v = valueTypeUint - n.u = uint64(bigen.Uint64(d.d.decRd.readn8())) + n.u = uint64(bigen.Uint64(d.r.readn8())) case mpInt8: n.v = valueTypeInt - n.i = int64(int8(d.d.decRd.readn1())) + n.i = int64(int8(d.r.readn1())) case mpInt16: n.v = valueTypeInt - n.i = int64(int16(bigen.Uint16(d.d.decRd.readn2()))) + n.i = int64(int16(bigen.Uint16(d.r.readn2()))) case mpInt32: n.v = valueTypeInt - n.i = int64(int32(bigen.Uint32(d.d.decRd.readn4()))) + n.i = int64(int32(bigen.Uint32(d.r.readn4()))) case mpInt64: n.v = valueTypeInt - n.i = int64(int64(bigen.Uint64(d.d.decRd.readn8()))) + n.i = int64(int64(bigen.Uint64(d.r.readn8()))) default: switch { @@ -487,7 +383,7 @@ func (d *msgpackDecDriver) DecodeNaked() { n.v = valueTypeInt n.i = int64(int8(bd)) case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - d.d.fauxUnionReadRawBytes(d.h.WriteExt) + d.d.fauxUnionReadRawBytes(d, d.h.WriteExt, d.h.RawToString) //, d.h.ZeroCopy) // if d.h.WriteExt || d.h.RawToString { // n.v = valueTypeString // n.s = d.d.stringZC(d.DecodeStringAsBytes()) @@ -496,7 +392,7 @@ func (d *msgpackDecDriver) DecodeNaked() { // n.l = d.DecodeBytes([]byte{}) // } case bd == mpBin8, bd == mpBin16, bd == mpBin32: - d.d.fauxUnionReadRawBytes(false) + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy) case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: n.v = valueTypeArray decodeFurther = true @@ -506,17 +402,15 @@ func (d *msgpackDecDriver) DecodeNaked() { case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: n.v = valueTypeExt clen := d.readExtLen() - n.u = uint64(d.d.decRd.readn1()) + n.u = uint64(d.r.readn1()) if n.u == uint64(mpTimeExtTagU) { n.v = valueTypeTime n.t = d.decodeTime(clen) - } else if d.d.bytes { - n.l = d.d.decRd.rb.readx(uint(clen)) } else { - n.l = decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, d.d.b[:]) + n.l = d.r.readx(uint(clen)) } default: - d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + halt.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) } } if !decodeFurther { @@ -528,32 +422,18 @@ func (d *msgpackDecDriver) DecodeNaked() { } } -func (d *msgpackDecDriver) nextValueBytes(v0 []byte) (v []byte) { +func (d *msgpackDecDriver[T]) nextValueBytes() (v []byte) { if !d.bdRead { d.readNextBd() } - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - var cursor = d.d.rb.c - 1 - h.append1(&v, d.bd) - v = d.nextValueBytesBdReadR(v) + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() d.bdRead = false - h.bytesRdV(&v, cursor) return } -func (d *msgpackDecDriver) nextValueBytesR(v0 []byte) (v []byte) { - d.readNextBd() - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - h.append1(&v, d.bd) - return d.nextValueBytesBdReadR(v) -} - -func (d *msgpackDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - +func (d *msgpackDecDriver[T]) nextValueBytesBdReadR() { bd := d.bd var clen uint @@ -561,88 +441,84 @@ func (d *msgpackDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { switch bd { case mpNil, mpFalse, mpTrue: // pass case mpUint8, mpInt8: - h.append1(&v, d.d.decRd.readn1()) + d.r.readn1() case mpUint16, mpInt16: - h.appendN(&v, d.d.decRd.readx(2)...) + d.r.skip(2) case mpFloat, mpUint32, mpInt32: - h.appendN(&v, d.d.decRd.readx(4)...) + d.r.skip(4) case mpDouble, mpUint64, mpInt64: - h.appendN(&v, d.d.decRd.readx(8)...) + d.r.skip(8) case mpStr8, mpBin8: - clen = uint(d.d.decRd.readn1()) - h.append1(&v, byte(clen)) - h.appendN(&v, d.d.decRd.readx(clen)...) + clen = uint(d.r.readn1()) + d.r.skip(clen) case mpStr16, mpBin16: - x := d.d.decRd.readn2() - h.appendN(&v, x[:]...) + x := d.r.readn2() clen = uint(bigen.Uint16(x)) - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.skip(clen) case mpStr32, mpBin32: - x := d.d.decRd.readn4() - h.appendN(&v, x[:]...) + x := d.r.readn4() clen = uint(bigen.Uint32(x)) - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.skip(clen) case mpFixExt1: - h.append1(&v, d.d.decRd.readn1()) // tag - h.append1(&v, d.d.decRd.readn1()) + d.r.readn1() // tag + d.r.readn1() case mpFixExt2: - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(2)...) + d.r.readn1() // tag + d.r.skip(2) case mpFixExt4: - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(4)...) + d.r.readn1() // tag + d.r.skip(4) case mpFixExt8: - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(8)...) + d.r.readn1() // tag + d.r.skip(8) case mpFixExt16: - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(16)...) + d.r.readn1() // tag + d.r.skip(16) case mpExt8: - clen = uint(d.d.decRd.readn1()) - h.append1(&v, uint8(clen)) - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(clen)...) + clen = uint(d.r.readn1()) + d.r.readn1() // tag + d.r.skip(clen) case mpExt16: - x := d.d.decRd.readn2() + x := d.r.readn2() clen = uint(bigen.Uint16(x)) - h.appendN(&v, x[:]...) - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.readn1() // tag + d.r.skip(clen) case mpExt32: - x := d.d.decRd.readn4() + x := d.r.readn4() clen = uint(bigen.Uint32(x)) - h.appendN(&v, x[:]...) - h.append1(&v, d.d.decRd.readn1()) // tag - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.readn1() // tag + d.r.skip(clen) case mpArray16: - x := d.d.decRd.readn2() + x := d.r.readn2() clen = uint(bigen.Uint16(x)) - h.appendN(&v, x[:]...) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() } case mpArray32: - x := d.d.decRd.readn4() + x := d.r.readn4() clen = uint(bigen.Uint32(x)) - h.appendN(&v, x[:]...) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() } case mpMap16: - x := d.d.decRd.readn2() + x := d.r.readn2() clen = uint(bigen.Uint16(x)) - h.appendN(&v, x[:]...) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } case mpMap32: - x := d.d.decRd.readn4() + x := d.r.readn4() clen = uint(bigen.Uint32(x)) - h.appendN(&v, x[:]...) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } default: switch { @@ -650,65 +526,68 @@ func (d *msgpackDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: // pass case bd >= mpFixStrMin && bd <= mpFixStrMax: clen = uint(mpFixStrMin ^ bd) - h.appendN(&v, d.d.decRd.readx(clen)...) + d.r.skip(clen) case bd >= mpFixArrayMin && bd <= mpFixArrayMax: clen = uint(mpFixArrayMin ^ bd) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() } case bd >= mpFixMapMin && bd <= mpFixMapMax: clen = uint(mpFixMapMin ^ bd) for i := uint(0); i < clen; i++ { - v = d.nextValueBytesR(v) - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } default: - d.d.errorf("nextValueBytes: cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + halt.errorf("nextValueBytes: cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) } } return } -func (d *msgpackDecDriver) decFloat4Int32() (f float32) { - fbits := bigen.Uint32(d.d.decRd.readn4()) +func (d *msgpackDecDriver[T]) decFloat4Int32() (f float32) { + fbits := bigen.Uint32(d.r.readn4()) f = math.Float32frombits(fbits) if !noFrac32(fbits) { - d.d.errorf("assigning integer value from float32 with a fraction: %v", f) + halt.errorf("assigning integer value from float32 with a fraction: %v", f) } return } -func (d *msgpackDecDriver) decFloat4Int64() (f float64) { - fbits := bigen.Uint64(d.d.decRd.readn8()) +func (d *msgpackDecDriver[T]) decFloat4Int64() (f float64) { + fbits := bigen.Uint64(d.r.readn8()) f = math.Float64frombits(fbits) if !noFrac64(fbits) { - d.d.errorf("assigning integer value from float64 with a fraction: %v", f) + halt.errorf("assigning integer value from float64 with a fraction: %v", f) } return } // int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeInt64() (i int64) { +func (d *msgpackDecDriver[T]) DecodeInt64() (i int64) { if d.advanceNil() { return } switch d.bd { case mpUint8: - i = int64(uint64(d.d.decRd.readn1())) + i = int64(uint64(d.r.readn1())) case mpUint16: - i = int64(uint64(bigen.Uint16(d.d.decRd.readn2()))) + i = int64(uint64(bigen.Uint16(d.r.readn2()))) case mpUint32: - i = int64(uint64(bigen.Uint32(d.d.decRd.readn4()))) + i = int64(uint64(bigen.Uint32(d.r.readn4()))) case mpUint64: - i = int64(bigen.Uint64(d.d.decRd.readn8())) + i = int64(bigen.Uint64(d.r.readn8())) case mpInt8: - i = int64(int8(d.d.decRd.readn1())) + i = int64(int8(d.r.readn1())) case mpInt16: - i = int64(int16(bigen.Uint16(d.d.decRd.readn2()))) + i = int64(int16(bigen.Uint16(d.r.readn2()))) case mpInt32: - i = int64(int32(bigen.Uint32(d.d.decRd.readn4()))) + i = int64(int32(bigen.Uint32(d.r.readn4()))) case mpInt64: - i = int64(bigen.Uint64(d.d.decRd.readn8())) + i = int64(bigen.Uint64(d.r.readn8())) case mpFloat: i = int64(d.decFloat4Int32()) case mpDouble: @@ -720,7 +599,7 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) { case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: i = int64(int8(d.bd)) default: - d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + halt.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) } } d.bdRead = false @@ -728,63 +607,63 @@ func (d *msgpackDecDriver) DecodeInt64() (i int64) { } // uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { +func (d *msgpackDecDriver[T]) DecodeUint64() (ui uint64) { if d.advanceNil() { return } switch d.bd { case mpUint8: - ui = uint64(d.d.decRd.readn1()) + ui = uint64(d.r.readn1()) case mpUint16: - ui = uint64(bigen.Uint16(d.d.decRd.readn2())) + ui = uint64(bigen.Uint16(d.r.readn2())) case mpUint32: - ui = uint64(bigen.Uint32(d.d.decRd.readn4())) + ui = uint64(bigen.Uint32(d.r.readn4())) case mpUint64: - ui = bigen.Uint64(d.d.decRd.readn8()) + ui = bigen.Uint64(d.r.readn8()) case mpInt8: - if i := int64(int8(d.d.decRd.readn1())); i >= 0 { + if i := int64(int8(d.r.readn1())); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + halt.errorf("assigning negative signed value: %v, to unsigned type", i) } case mpInt16: - if i := int64(int16(bigen.Uint16(d.d.decRd.readn2()))); i >= 0 { + if i := int64(int16(bigen.Uint16(d.r.readn2()))); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + halt.errorf("assigning negative signed value: %v, to unsigned type", i) } case mpInt32: - if i := int64(int32(bigen.Uint32(d.d.decRd.readn4()))); i >= 0 { + if i := int64(int32(bigen.Uint32(d.r.readn4()))); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + halt.errorf("assigning negative signed value: %v, to unsigned type", i) } case mpInt64: - if i := int64(bigen.Uint64(d.d.decRd.readn8())); i >= 0 { + if i := int64(bigen.Uint64(d.r.readn8())); i >= 0 { ui = uint64(i) } else { - d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + halt.errorf("assigning negative signed value: %v, to unsigned type", i) } case mpFloat: if f := d.decFloat4Int32(); f >= 0 { ui = uint64(f) } else { - d.d.errorf("assigning negative float value: %v, to unsigned type", f) + halt.errorf("assigning negative float value: %v, to unsigned type", f) } case mpDouble: if f := d.decFloat4Int64(); f >= 0 { ui = uint64(f) } else { - d.d.errorf("assigning negative float value: %v, to unsigned type", f) + halt.errorf("assigning negative float value: %v, to unsigned type", f) } default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: ui = uint64(d.bd) case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + halt.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) default: - d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + halt.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) } } d.bdRead = false @@ -792,14 +671,14 @@ func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { } // float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) DecodeFloat64() (f float64) { +func (d *msgpackDecDriver[T]) DecodeFloat64() (f float64) { if d.advanceNil() { return } if d.bd == mpFloat { - f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4()))) + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) } else if d.bd == mpDouble { - f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8())) + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) } else { f = float64(d.DecodeInt64()) } @@ -808,7 +687,7 @@ func (d *msgpackDecDriver) DecodeFloat64() (f float64) { } // bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) DecodeBool() (b bool) { +func (d *msgpackDecDriver[T]) DecodeBool() (b bool) { if d.advanceNil() { return } @@ -817,18 +696,18 @@ func (d *msgpackDecDriver) DecodeBool() (b bool) { } else if d.bd == mpTrue || d.bd == 1 { b = true } else { - d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + halt.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) } d.bdRead = false return } -func (d *msgpackDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { - d.d.decByteState = decByteStateNone +func (d *msgpackDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) { if d.advanceNil() { return } + var cond bool bd := d.bd var clen int if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { @@ -838,58 +717,43 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { clen = d.readContainerLen(msgpackContainerStr) // string/raw } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - // check if an "array" of uint8's - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } - // bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) slen := d.ReadArrayStart() - var changed bool - if bs, changed = usableByteSlice(bs, slen); changed { - d.d.decByteState = decByteStateNone - } + bs, cond = usableByteSlice(d.d.buf, slen) for i := 0; i < len(bs); i++ { bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) } for i := len(bs); i < slen; i++ { bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) } - return bs + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + return } else { - d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + halt.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) } d.bdRead = false - if d.d.zerocopy() { - d.d.decByteState = decByteStateZerocopy - return d.d.decRd.rb.readx(uint(clen)) - } - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } - return decByteSlice(d.d.r(), clen, d.h.MaxInitLen, bs) + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return } -func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { - s = d.DecodeBytes(nil) - if d.h.ValidateUnicode && !utf8.Valid(s) { - d.d.errorf("DecodeStringAsBytes: invalid UTF-8: %s", s) +func (d *msgpackDecDriver[T]) DecodeStringAsBytes() (out []byte, state dBytesAttachState) { + out, state = d.DecodeBytes() + if d.h.ValidateUnicode && !utf8.Valid(out) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out) } return } -func (d *msgpackDecDriver) descBd() string { - return sprintf("%v (%s)", d.bd, mpdesc(d.bd)) -} - -func (d *msgpackDecDriver) readNextBd() { - d.bd = d.d.decRd.readn1() +func (d *msgpackDecDriver[T]) readNextBd() { + d.bd = d.r.readn1() d.bdRead = true } -func (d *msgpackDecDriver) advanceNil() (null bool) { +func (d *msgpackDecDriver[T]) advanceNil() (null bool) { if !d.bdRead { d.readNextBd() } @@ -900,11 +764,11 @@ func (d *msgpackDecDriver) advanceNil() (null bool) { return } -func (d *msgpackDecDriver) TryNil() (v bool) { +func (d *msgpackDecDriver[T]) TryNil() (v bool) { return d.advanceNil() } -func (d *msgpackDecDriver) ContainerType() (vt valueType) { +func (d *msgpackDecDriver[T]) ContainerType() (vt valueType) { if !d.bdRead { d.readNextBd() } @@ -928,38 +792,38 @@ func (d *msgpackDecDriver) ContainerType() (vt valueType) { return valueTypeUnset } -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { +func (d *msgpackDecDriver[T]) readContainerLen(ct msgpackContainerType) (clen int) { bd := d.bd if bd == ct.b8 { - clen = int(d.d.decRd.readn1()) + clen = int(d.r.readn1()) } else if bd == ct.b16 { - clen = int(bigen.Uint16(d.d.decRd.readn2())) + clen = int(bigen.Uint16(d.r.readn2())) } else if bd == ct.b32 { - clen = int(bigen.Uint32(d.d.decRd.readn4())) + clen = int(bigen.Uint32(d.r.readn4())) } else if (ct.bFixMin & bd) == ct.bFixMin { clen = int(ct.bFixMin ^ bd) } else { - d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + halt.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) } d.bdRead = false return } -func (d *msgpackDecDriver) ReadMapStart() int { +func (d *msgpackDecDriver[T]) ReadMapStart() int { if d.advanceNil() { return containerLenNil } return d.readContainerLen(msgpackContainerMap) } -func (d *msgpackDecDriver) ReadArrayStart() int { +func (d *msgpackDecDriver[T]) ReadArrayStart() int { if d.advanceNil() { return containerLenNil } return d.readContainerLen(msgpackContainerList) } -func (d *msgpackDecDriver) readExtLen() (clen int) { +func (d *msgpackDecDriver[T]) readExtLen() (clen int) { switch d.bd { case mpFixExt1: clen = 1 @@ -972,18 +836,18 @@ func (d *msgpackDecDriver) readExtLen() (clen int) { case mpFixExt16: clen = 16 case mpExt8: - clen = int(d.d.decRd.readn1()) + clen = int(d.r.readn1()) case mpExt16: - clen = int(bigen.Uint16(d.d.decRd.readn2())) + clen = int(bigen.Uint16(d.r.readn2())) case mpExt32: - clen = int(bigen.Uint32(d.d.decRd.readn4())) + clen = int(bigen.Uint32(d.r.readn4())) default: - d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + halt.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) } return } -func (d *msgpackDecDriver) DecodeTime() (t time.Time) { +func (d *msgpackDecDriver[T]) DecodeTime() (t time.Time) { // decode time from string bytes or ext if d.advanceNil() { return @@ -998,240 +862,158 @@ func (d *msgpackDecDriver) DecodeTime() (t time.Time) { } else { // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 d.bdRead = false - b2 := d.d.decRd.readn1() + b2 := d.r.readn1() if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { clen = 4 } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { clen = 8 - } else if d.bd == mpExt8 && b2 == 12 && d.d.decRd.readn1() == mpTimeExtTagU { + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { clen = 12 } else { - d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + halt.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) } } return d.decodeTime(clen) } -func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { +func (d *msgpackDecDriver[T]) decodeTime(clen int) (t time.Time) { d.bdRead = false switch clen { case 4: - t = time.Unix(int64(bigen.Uint32(d.d.decRd.readn4())), 0).UTC() + t = time.Unix(int64(bigen.Uint32(d.r.readn4())), 0).UTC() case 8: - tv := bigen.Uint64(d.d.decRd.readn8()) + tv := bigen.Uint64(d.r.readn8()) t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() case 12: - nsec := bigen.Uint32(d.d.decRd.readn4()) - sec := bigen.Uint64(d.d.decRd.readn8()) + nsec := bigen.Uint32(d.r.readn4()) + sec := bigen.Uint64(d.r.readn8()) t = time.Unix(int64(sec), int64(nsec)).UTC() default: - d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) + halt.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) } return } -func (d *msgpackDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { - if xtag > 0xff { - d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) - } - if d.advanceNil() { +func (d *msgpackDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { return } - xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag := uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.setData(xbs, zerocopy) - } else if ext == SelfExt { - d.d.sideDecode(rv, basetype, xbs) + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) } else { ext.ReadExt(rv, xbs) } } -func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) { +func (d *msgpackDecDriver[T]) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *msgpackDecDriver[T]) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) xbd := d.bd if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil) + xbs, bstate = d.DecodeBytes() } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeStringAsBytes() + xbs, bstate = d.DecodeStringAsBytes() } else { clen := d.readExtLen() - xtag = d.d.decRd.readn1() + xtag = d.r.readn1() if verifyTag && xtag != tag { - d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) - } - if d.d.bytes { - xbs = d.d.decRd.rb.readx(uint(clen)) - zerocopy = true - } else { - xbs = decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, d.d.b[:]) + halt.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) } + xbs, ok = d.r.readxb(uint(clen)) + bstate = d.d.attachState(ok) + // zerocopy = d.d.bytes } d.bdRead = false + ok = true return } -//-------------------------------------------------- - -// MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - binaryEncodingType - BasicHandle - - // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. - NoFixedNum bool - - // WriteExt controls whether the new spec is honored. - // - // With WriteExt=true, we can encode configured extensions with extension tags - // and encode string/[]byte/extensions in a way compatible with the new spec - // but incompatible with the old spec. - // - // For compatibility with the old spec, set WriteExt=false. - // - // With WriteExt=false: - // configured extensions are serialized as raw bytes (not msgpack extensions). - // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type - // are not encoded. - WriteExt bool - - // PositiveIntUnsigned says to encode positive integers as unsigned. - PositiveIntUnsigned bool -} - -// Name returns the name of the handle: msgpack -func (h *MsgpackHandle) Name() string { return "msgpack" } - -func (h *MsgpackHandle) desc(bd byte) string { return mpdesc(bd) } - -func (h *MsgpackHandle) newEncDriver() encDriver { - var e = &msgpackEncDriver{h: h} - e.e.e = e - e.e.init(h) - e.reset() - return e -} - -func (h *MsgpackHandle) newDecDriver() decDriver { - d := &msgpackDecDriver{h: h} - d.d.d = d - d.d.init(h) - d.reset() - return d -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - if cls := c.cls.load(); cls.closed { - return io.ErrUnexpectedEOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - - var ba [1]byte - var n int - for { - n, err = c.r.Read(ba[:]) - if err != nil { - return - } - if n == 1 { - break - } - } - - var b = ba[0] - if b != fia { - err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) - } else { - err = c.read(&b) - if err == nil { - if b != expectTypeByte { - err = fmt.Errorf("%s - expecting %v but got %x/%s", msgBadDesc, expectTypeByte, b, mpdesc(b)) - } else { - err = c.read(msgid) - if err == nil { - err = c.read(methodOrError) - } - } - } - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// ---- // -// See GoRpc documentation, for information on buffering for better performance. -var MsgpackSpecRpc msgpackSpecRpc +// The following below are similar across all format files (except for the format name). +// +// We keep them together here, so that we can easily copy and compare. -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +// ---- + +func (d *msgpackEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*MsgpackHandle) + d.e = shared + if shared.bytes { + fp = msgpackFpEncBytes + } else { + fp = msgpackFpEncIO + } + // d.w.init() + d.init2(enc) + return } -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +func (e *msgpackEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) } + +// func (e *msgpackEncDriver[T]) writeStringAsisDblQuoted(v string) { e.w.writeqstr(v) } + +func (e *msgpackEncDriver[T]) writerEnd() { e.w.end() } + +func (e *msgpackEncDriver[T]) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) } -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) +func (e *msgpackEncDriver[T]) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +// ---- + +func (d *msgpackDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*MsgpackHandle) + d.d = shared + if shared.bytes { + fp = msgpackFpDecBytes + } else { + fp = msgpackFpDecIO + } + // d.r.init() + d.init2(dec) + return +} + +func (d *msgpackDecDriver[T]) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *msgpackDecDriver[T]) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *msgpackDecDriver[T]) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +// ---- (custom stanza) + +func (d *msgpackDecDriver[T]) descBd() string { + return sprintf("%v (%s)", d.bd, mpdesc(d.bd)) +} + +func (d *msgpackDecDriver[T]) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.mono.generated.go b/vendor/github.com/ugorji/go/codec/msgpack.mono.generated.go new file mode 100644 index 000000000..ff2fe4af9 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.mono.generated.go @@ -0,0 +1,8046 @@ +//go:build !notmono && !codec.notmono + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + + "io" + "math" + "reflect" + "slices" + "sort" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +type helperEncDriverMsgpackBytes struct{} +type encFnMsgpackBytes struct { + i encFnInfo + fe func(*encoderMsgpackBytes, *encFnInfo, reflect.Value) +} +type encRtidFnMsgpackBytes struct { + rtid uintptr + fn *encFnMsgpackBytes +} +type encoderMsgpackBytes struct { + dh helperEncDriverMsgpackBytes + fp *fastpathEsMsgpackBytes + e msgpackEncDriverBytes + encoderBase +} +type helperDecDriverMsgpackBytes struct{} +type decFnMsgpackBytes struct { + i decFnInfo + fd func(*decoderMsgpackBytes, *decFnInfo, reflect.Value) +} +type decRtidFnMsgpackBytes struct { + rtid uintptr + fn *decFnMsgpackBytes +} +type decoderMsgpackBytes struct { + dh helperDecDriverMsgpackBytes + fp *fastpathDsMsgpackBytes + d msgpackDecDriverBytes + decoderBase +} +type msgpackEncDriverBytes struct { + noBuiltInTypes + encDriverNoopContainerWriter + encDriverNoState + encDriverContainerNoTrackerT + encInit2er + + h *MsgpackHandle + e *encoderBase + w bytesEncAppender +} +type msgpackDecDriverBytes struct { + decDriverNoopContainerReader + decDriverNoopNumberHelper + decInit2er + + h *MsgpackHandle + d *decoderBase + r bytesDecReader + + bdAndBdread + + noBuiltInTypes +} + +func (e *encoderMsgpackBytes) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderMsgpackBytes) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderMsgpackBytes) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderMsgpackBytes) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderMsgpackBytes) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderMsgpackBytes) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderMsgpackBytes) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderMsgpackBytes) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderMsgpackBytes) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderMsgpackBytes) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderMsgpackBytes) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderMsgpackBytes) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderMsgpackBytes) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderMsgpackBytes) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderMsgpackBytes) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderMsgpackBytes) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderMsgpackBytes) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderMsgpackBytes) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderMsgpackBytes) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderMsgpackBytes) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderMsgpackBytes) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderMsgpackBytes) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderMsgpackBytes) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderMsgpackBytes) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderMsgpackBytes) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderMsgpackBytes) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderMsgpackBytes) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderMsgpackBytes) kSeqFn(rt reflect.Type) (fn *encFnMsgpackBytes) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderMsgpackBytes) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnMsgpackBytes + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderMsgpackBytes) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnMsgpackBytes + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderMsgpackBytes) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderMsgpackBytes) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderMsgpackBytes) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderMsgpackBytes) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderMsgpackBytes) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderMsgpackBytes) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderMsgpackBytes) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderMsgpackBytes) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnMsgpackBytes + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackBytes) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnMsgpackBytes) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderMsgpackBytes) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsMsgpackBytes) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderMsgpackBytes) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderMsgpackBytes) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderMsgpackBytes) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderMsgpackBytes) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderMsgpackBytes) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderMsgpackBytes) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderMsgpackBytes) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderMsgpackBytes) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderMsgpackBytes) encodeValue(rv reflect.Value, fn *encFnMsgpackBytes) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderMsgpackBytes) encodeValueNonNil(rv reflect.Value, fn *encFnMsgpackBytes) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderMsgpackBytes) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderMsgpackBytes) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderMsgpackBytes) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderMsgpackBytes) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderMsgpackBytes) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderMsgpackBytes) fn(t reflect.Type) *encFnMsgpackBytes { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderMsgpackBytes) fnNoExt(t reflect.Type) *encFnMsgpackBytes { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderMsgpackBytes) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderMsgpackBytes) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderMsgpackBytes) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderMsgpackBytes) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderMsgpackBytes) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderMsgpackBytes) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderMsgpackBytes) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderMsgpackBytes) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverMsgpackBytes) newEncoderBytes(out *[]byte, h Handle) *encoderMsgpackBytes { + var c1 encoderMsgpackBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverMsgpackBytes) newEncoderIO(out io.Writer, h Handle) *encoderMsgpackBytes { + var c1 encoderMsgpackBytes + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverMsgpackBytes) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsMsgpackBytes) (f *fastpathEMsgpackBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverMsgpackBytes) encFindRtidFn(s []encRtidFnMsgpackBytes, rtid uintptr) (i uint, fn *encFnMsgpackBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverMsgpackBytes) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnMsgpackBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnMsgpackBytes](v)) + } + return +} + +func (dh helperEncDriverMsgpackBytes) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsMsgpackBytes, checkExt bool) (fn *encFnMsgpackBytes) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverMsgpackBytes) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsMsgpackBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnMsgpackBytes) { + rtid := rt2id(rt) + var sp []encRtidFnMsgpackBytes = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverMsgpackBytes) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsMsgpackBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnMsgpackBytes) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnMsgpackBytes + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnMsgpackBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnMsgpackBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnMsgpackBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverMsgpackBytes) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsMsgpackBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnMsgpackBytes) { + fn = new(encFnMsgpackBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderMsgpackBytes).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderMsgpackBytes).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderMsgpackBytes).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderMsgpackBytes).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderMsgpackBytes).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderMsgpackBytes).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderMsgpackBytes).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderMsgpackBytes).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderMsgpackBytes, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderMsgpackBytes).kBool + case reflect.String: + + fn.fe = (*encoderMsgpackBytes).kString + case reflect.Int: + fn.fe = (*encoderMsgpackBytes).kInt + case reflect.Int8: + fn.fe = (*encoderMsgpackBytes).kInt8 + case reflect.Int16: + fn.fe = (*encoderMsgpackBytes).kInt16 + case reflect.Int32: + fn.fe = (*encoderMsgpackBytes).kInt32 + case reflect.Int64: + fn.fe = (*encoderMsgpackBytes).kInt64 + case reflect.Uint: + fn.fe = (*encoderMsgpackBytes).kUint + case reflect.Uint8: + fn.fe = (*encoderMsgpackBytes).kUint8 + case reflect.Uint16: + fn.fe = (*encoderMsgpackBytes).kUint16 + case reflect.Uint32: + fn.fe = (*encoderMsgpackBytes).kUint32 + case reflect.Uint64: + fn.fe = (*encoderMsgpackBytes).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderMsgpackBytes).kUintptr + case reflect.Float32: + fn.fe = (*encoderMsgpackBytes).kFloat32 + case reflect.Float64: + fn.fe = (*encoderMsgpackBytes).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderMsgpackBytes).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderMsgpackBytes).kComplex128 + case reflect.Chan: + fn.fe = (*encoderMsgpackBytes).kChan + case reflect.Slice: + fn.fe = (*encoderMsgpackBytes).kSlice + case reflect.Array: + fn.fe = (*encoderMsgpackBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderMsgpackBytes).kStructSimple + } else { + fn.fe = (*encoderMsgpackBytes).kStruct + } + case reflect.Map: + fn.fe = (*encoderMsgpackBytes).kMap + case reflect.Interface: + + fn.fe = (*encoderMsgpackBytes).kErr + default: + + fn.fe = (*encoderMsgpackBytes).kErr + } + } + } + return +} +func (d *decoderMsgpackBytes) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderMsgpackBytes) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderMsgpackBytes) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderMsgpackBytes) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderMsgpackBytes) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderMsgpackBytes) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderMsgpackBytes) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderMsgpackBytes) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderMsgpackBytes) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderMsgpackBytes) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderMsgpackBytes) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderMsgpackBytes) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderMsgpackBytes) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderMsgpackBytes) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderMsgpackBytes) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderMsgpackBytes) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderMsgpackBytes) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderMsgpackBytes) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderMsgpackBytes) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderMsgpackBytes) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderMsgpackBytes) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderMsgpackBytes) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderMsgpackBytes) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderMsgpackBytes) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderMsgpackBytes) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderMsgpackBytes) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderMsgpackBytes) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderMsgpackBytes) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderMsgpackBytes) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderMsgpackBytes) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderMsgpackBytes) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderMsgpackBytes) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderMsgpackBytes) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnMsgpackBytes + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderMsgpackBytes) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnMsgpackBytes + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackBytes) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnMsgpackBytes + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderMsgpackBytes) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnMsgpackBytes + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderMsgpackBytes) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsMsgpackBytes) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderMsgpackBytes) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderMsgpackBytes) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderMsgpackBytes) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderMsgpackBytes) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderMsgpackBytes) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderMsgpackBytes) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderMsgpackBytes) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderMsgpackBytes) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderMsgpackBytes) Release() {} + +func (d *decoderMsgpackBytes) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderMsgpackBytes) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderMsgpackBytes) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderMsgpackBytes) decodeValue(rv reflect.Value, fn *decFnMsgpackBytes) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderMsgpackBytes) decodeValueNoCheckNil(rv reflect.Value, fn *decFnMsgpackBytes) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderMsgpackBytes) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderMsgpackBytes) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderMsgpackBytes) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderMsgpackBytes) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderMsgpackBytes) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderMsgpackBytes) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderMsgpackBytes) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderMsgpackBytes) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderMsgpackBytes) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderMsgpackBytes) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderMsgpackBytes) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderMsgpackBytes) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderMsgpackBytes) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderMsgpackBytes) fn(t reflect.Type) *decFnMsgpackBytes { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderMsgpackBytes) fnNoExt(t reflect.Type) *decFnMsgpackBytes { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverMsgpackBytes) newDecoderBytes(in []byte, h Handle) *decoderMsgpackBytes { + var c1 decoderMsgpackBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverMsgpackBytes) newDecoderIO(in io.Reader, h Handle) *decoderMsgpackBytes { + var c1 decoderMsgpackBytes + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverMsgpackBytes) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsMsgpackBytes) (f *fastpathDMsgpackBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverMsgpackBytes) decFindRtidFn(s []decRtidFnMsgpackBytes, rtid uintptr) (i uint, fn *decFnMsgpackBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverMsgpackBytes) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnMsgpackBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnMsgpackBytes](v)) + } + return +} + +func (dh helperDecDriverMsgpackBytes) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsMsgpackBytes, + checkExt bool) (fn *decFnMsgpackBytes) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverMsgpackBytes) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsMsgpackBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnMsgpackBytes) { + rtid := rt2id(rt) + var sp []decRtidFnMsgpackBytes = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverMsgpackBytes) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsMsgpackBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnMsgpackBytes) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnMsgpackBytes + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnMsgpackBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnMsgpackBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnMsgpackBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverMsgpackBytes) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsMsgpackBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnMsgpackBytes) { + fn = new(decFnMsgpackBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderMsgpackBytes).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderMsgpackBytes).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderMsgpackBytes).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderMsgpackBytes).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderMsgpackBytes).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderMsgpackBytes).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderMsgpackBytes).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderMsgpackBytes).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderMsgpackBytes, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderMsgpackBytes, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderMsgpackBytes).kBool + case reflect.String: + fn.fd = (*decoderMsgpackBytes).kString + case reflect.Int: + fn.fd = (*decoderMsgpackBytes).kInt + case reflect.Int8: + fn.fd = (*decoderMsgpackBytes).kInt8 + case reflect.Int16: + fn.fd = (*decoderMsgpackBytes).kInt16 + case reflect.Int32: + fn.fd = (*decoderMsgpackBytes).kInt32 + case reflect.Int64: + fn.fd = (*decoderMsgpackBytes).kInt64 + case reflect.Uint: + fn.fd = (*decoderMsgpackBytes).kUint + case reflect.Uint8: + fn.fd = (*decoderMsgpackBytes).kUint8 + case reflect.Uint16: + fn.fd = (*decoderMsgpackBytes).kUint16 + case reflect.Uint32: + fn.fd = (*decoderMsgpackBytes).kUint32 + case reflect.Uint64: + fn.fd = (*decoderMsgpackBytes).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderMsgpackBytes).kUintptr + case reflect.Float32: + fn.fd = (*decoderMsgpackBytes).kFloat32 + case reflect.Float64: + fn.fd = (*decoderMsgpackBytes).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderMsgpackBytes).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderMsgpackBytes).kComplex128 + case reflect.Chan: + fn.fd = (*decoderMsgpackBytes).kChan + case reflect.Slice: + fn.fd = (*decoderMsgpackBytes).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderMsgpackBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderMsgpackBytes).kStructSimple + } else { + fn.fd = (*decoderMsgpackBytes).kStruct + } + case reflect.Map: + fn.fd = (*decoderMsgpackBytes).kMap + case reflect.Interface: + + fn.fd = (*decoderMsgpackBytes).kInterface + default: + + fn.fd = (*decoderMsgpackBytes).kErr + } + } + } + return +} +func (e *msgpackEncDriverBytes) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriverBytes) EncodeInt(i int64) { + if e.h.PositiveIntUnsigned && i >= 0 { + e.EncodeUint(uint64(i)) + } else if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + e.w.writen2(bigen.PutUint16(uint16(i))) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + e.w.writen4(bigen.PutUint32(uint32(i))) + } else { + e.w.writen1(mpInt64) + e.w.writen8(bigen.PutUint64(uint64(i))) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + e.w.writen2(bigen.PutUint16(uint16(i))) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + e.w.writen4(bigen.PutUint32(uint32(i))) + } else { + e.w.writen1(mpInt64) + e.w.writen8(bigen.PutUint64(uint64(i))) + } +} + +func (e *msgpackEncDriverBytes) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + e.w.writen2(bigen.PutUint16(uint16(i))) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + e.w.writen4(bigen.PutUint32(uint32(i))) + } else { + e.w.writen1(mpUint64) + e.w.writen8(bigen.PutUint64(uint64(i))) + } +} + +func (e *msgpackEncDriverBytes) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriverBytes) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) +} + +func (e *msgpackEncDriverBytes) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) +} + +func (e *msgpackEncDriverBytes) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, l) + } + switch l { + case 4: + e.w.writen4(bigen.PutUint32(uint32(data64))) + case 8: + e.w.writen8(bigen.PutUint64(data64)) + case 12: + e.w.writen4(bigen.PutUint32(uint32(nsec))) + e.w.writen8(bigen.PutUint64(uint64(sec))) + } +} + +func (e *msgpackEncDriverBytes) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + var bs0, bs []byte + if ext == SelfExt { + bs0 = e.e.blist.get(1024) + bs = bs0 + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) + } else { + bs = ext.WriteExt(v) + } + if bs == nil { + e.writeNilBytes() + goto END + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeBytes(bs) + } +END: + if ext == SelfExt { + e.e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.e.blist.put(bs0) + } + } +} + +func (e *msgpackEncDriverBytes) EncodeRawExt(re *RawExt) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriverBytes) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + e.w.writen2(bigen.PutUint16(uint16(l))) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + e.w.writen4(bigen.PutUint32(uint32(l))) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriverBytes) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriverBytes) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriverBytes) WriteArrayEmpty() { + + e.w.writen1(mpFixArrayMin) +} + +func (e *msgpackEncDriverBytes) WriteMapEmpty() { + + e.w.writen1(mpFixMapMin) +} + +func (e *msgpackEncDriverBytes) EncodeString(s string) { + var ct msgpackContainerType + if e.h.WriteExt { + if e.h.StringToRaw { + ct = msgpackContainerBin + } else { + ct = msgpackContainerStr + } + } else { + ct = msgpackContainerRawLegacy + } + e.writeContainerLen(ct, len(s)) + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriverBytes) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *msgpackEncDriverBytes) EncodeStringBytesRaw(bs []byte) { + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriverBytes) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *msgpackEncDriverBytes) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = mpNil + } + e.w.writen1(v) +} + +func (e *msgpackEncDriverBytes) writeNilArray() { + e.writeNilOr(mpFixArrayMin) +} + +func (e *msgpackEncDriverBytes) writeNilMap() { + e.writeNilOr(mpFixMapMin) +} + +func (e *msgpackEncDriverBytes) writeNilBytes() { + e.writeNilOr(mpFixStrMin) +} + +func (e *msgpackEncDriverBytes) writeContainerLen(ct msgpackContainerType, l int) { + if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.b8 > 0 && l < 256 { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + e.w.writen2(bigen.PutUint16(uint16(l))) + } else { + e.w.writen1(ct.b32) + e.w.writen4(bigen.PutUint32(uint32(l))) + } +} + +func (d *msgpackDecDriverBytes) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.naked() + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readn2())) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readn4())) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readn8())) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readn2()))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readn4()))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readn8()))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + d.d.fauxUnionReadRawBytes(d, d.h.WriteExt, d.h.RawToString) + + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else { + n.l = d.r.readx(uint(clen)) + } + default: + halt.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } +} + +func (d *msgpackDecDriverBytes) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *msgpackDecDriverBytes) nextValueBytesBdReadR() { + bd := d.bd + + var clen uint + + switch bd { + case mpNil, mpFalse, mpTrue: + case mpUint8, mpInt8: + d.r.readn1() + case mpUint16, mpInt16: + d.r.skip(2) + case mpFloat, mpUint32, mpInt32: + d.r.skip(4) + case mpDouble, mpUint64, mpInt64: + d.r.skip(8) + case mpStr8, mpBin8: + clen = uint(d.r.readn1()) + d.r.skip(clen) + case mpStr16, mpBin16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + d.r.skip(clen) + case mpStr32, mpBin32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + d.r.skip(clen) + case mpFixExt1: + d.r.readn1() + d.r.readn1() + case mpFixExt2: + d.r.readn1() + d.r.skip(2) + case mpFixExt4: + d.r.readn1() + d.r.skip(4) + case mpFixExt8: + d.r.readn1() + d.r.skip(8) + case mpFixExt16: + d.r.readn1() + d.r.skip(16) + case mpExt8: + clen = uint(d.r.readn1()) + d.r.readn1() + d.r.skip(clen) + case mpExt16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + d.r.readn1() + d.r.skip(clen) + case mpExt32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + d.r.readn1() + d.r.skip(clen) + case mpArray16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case mpArray32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case mpMap16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + case mpMap32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + case bd >= mpFixStrMin && bd <= mpFixStrMax: + clen = uint(mpFixStrMin ^ bd) + d.r.skip(clen) + case bd >= mpFixArrayMin && bd <= mpFixArrayMax: + clen = uint(mpFixArrayMin ^ bd) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case bd >= mpFixMapMin && bd <= mpFixMapMax: + clen = uint(mpFixMapMin ^ bd) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + default: + halt.errorf("nextValueBytes: cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + return +} + +func (d *msgpackDecDriverBytes) decFloat4Int32() (f float32) { + fbits := bigen.Uint32(d.r.readn4()) + f = math.Float32frombits(fbits) + if !noFrac32(fbits) { + halt.errorf("assigning integer value from float32 with a fraction: %v", f) + } + return +} + +func (d *msgpackDecDriverBytes) decFloat4Int64() (f float64) { + fbits := bigen.Uint64(d.r.readn8()) + f = math.Float64frombits(fbits) + if !noFrac64(fbits) { + halt.errorf("assigning integer value from float64 with a fraction: %v", f) + } + return +} + +func (d *msgpackDecDriverBytes) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readn2()))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readn4()))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readn8())) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readn2()))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readn4()))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readn8())) + case mpFloat: + i = int64(d.decFloat4Int32()) + case mpDouble: + i = int64(d.decFloat4Int64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + halt.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + } + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverBytes) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readn2())) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readn4())) + case mpUint64: + ui = bigen.Uint64(d.r.readn8()) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readn2()))); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readn4()))); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readn8())); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpFloat: + if f := d.decFloat4Int32(); f >= 0 { + ui = uint64(f) + } else { + halt.errorf("assigning negative float value: %v, to unsigned type", f) + } + case mpDouble: + if f := d.decFloat4Int64(); f >= 0 { + ui = uint64(f) + } else { + halt.errorf("assigning negative float value: %v, to unsigned type", f) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + halt.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + halt.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + } + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverBytes) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + } else { + f = float64(d.DecodeInt64()) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverBytes) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.bd == mpFalse || d.bd == 0 { + + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + halt.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverBytes) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + + var cond bool + bd := d.bd + var clen int + if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || + (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) + } else if bd == mpArray16 || bd == mpArray32 || + (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + slen := d.ReadArrayStart() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < len(bs); i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + return + } else { + halt.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + } + + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *msgpackDecDriverBytes) DecodeStringAsBytes() (out []byte, state dBytesAttachState) { + out, state = d.DecodeBytes() + if d.h.ValidateUnicode && !utf8.Valid(out) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out) + } + return +} + +func (d *msgpackDecDriverBytes) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriverBytes) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + return true + } + return +} + +func (d *msgpackDecDriverBytes) TryNil() (v bool) { + return d.advanceNil() +} + +func (d *msgpackDecDriverBytes) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + if bd == mpNil { + d.bdRead = false + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + return valueTypeBytes + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || + (bd >= mpFixStrMin && bd <= mpFixStrMax) { + if d.h.WriteExt || d.h.RawToString { + return valueTypeString + } + return valueTypeBytes + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } + return valueTypeUnset +} + +func (d *msgpackDecDriverBytes) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readn2())) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readn4())) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + halt.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverBytes) ReadMapStart() int { + if d.advanceNil() { + return containerLenNil + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriverBytes) ReadArrayStart() int { + if d.advanceNil() { + return containerLenNil + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriverBytes) readExtLen() (clen int) { + switch d.bd { + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readn2())) + case mpExt32: + clen = int(bigen.Uint32(d.r.readn4())) + default: + halt.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriverBytes) DecodeTime() (t time.Time) { + + if d.advanceNil() { + return + } + bd := d.bd + var clen int + if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || + (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) + } else { + + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + halt.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriverBytes) decodeTime(clen int) (t time.Time) { + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(d.r.readn4())), 0).UTC() + case 8: + tv := bigen.Uint64(d.r.readn8()) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(d.r.readn4()) + sec := bigen.Uint64(d.r.readn8()) + t = time.Unix(int64(sec), int64(nsec)).UTC() + default: + halt.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) + } + return +} + +func (d *msgpackDecDriverBytes) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) + } else { + ext.ReadExt(rv, xbs) + } +} + +func (d *msgpackDecDriverBytes) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *msgpackDecDriverBytes) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs, bstate = d.DecodeBytes() + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs, bstate = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + halt.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + } + xbs, ok = d.r.readxb(uint(clen)) + bstate = d.d.attachState(ok) + + } + d.bdRead = false + ok = true + return +} + +func (d *msgpackEncDriverBytes) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*MsgpackHandle) + d.e = shared + if shared.bytes { + fp = msgpackFpEncBytes + } else { + fp = msgpackFpEncIO + } + + d.init2(enc) + return +} + +func (e *msgpackEncDriverBytes) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *msgpackEncDriverBytes) writerEnd() { e.w.end() } + +func (e *msgpackEncDriverBytes) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *msgpackEncDriverBytes) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *msgpackDecDriverBytes) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*MsgpackHandle) + d.d = shared + if shared.bytes { + fp = msgpackFpDecBytes + } else { + fp = msgpackFpDecIO + } + + d.init2(dec) + return +} + +func (d *msgpackDecDriverBytes) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *msgpackDecDriverBytes) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *msgpackDecDriverBytes) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *msgpackDecDriverBytes) descBd() string { + return sprintf("%v (%s)", d.bd, mpdesc(d.bd)) +} + +func (d *msgpackDecDriverBytes) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} + +type helperEncDriverMsgpackIO struct{} +type encFnMsgpackIO struct { + i encFnInfo + fe func(*encoderMsgpackIO, *encFnInfo, reflect.Value) +} +type encRtidFnMsgpackIO struct { + rtid uintptr + fn *encFnMsgpackIO +} +type encoderMsgpackIO struct { + dh helperEncDriverMsgpackIO + fp *fastpathEsMsgpackIO + e msgpackEncDriverIO + encoderBase +} +type helperDecDriverMsgpackIO struct{} +type decFnMsgpackIO struct { + i decFnInfo + fd func(*decoderMsgpackIO, *decFnInfo, reflect.Value) +} +type decRtidFnMsgpackIO struct { + rtid uintptr + fn *decFnMsgpackIO +} +type decoderMsgpackIO struct { + dh helperDecDriverMsgpackIO + fp *fastpathDsMsgpackIO + d msgpackDecDriverIO + decoderBase +} +type msgpackEncDriverIO struct { + noBuiltInTypes + encDriverNoopContainerWriter + encDriverNoState + encDriverContainerNoTrackerT + encInit2er + + h *MsgpackHandle + e *encoderBase + w bufioEncWriter +} +type msgpackDecDriverIO struct { + decDriverNoopContainerReader + decDriverNoopNumberHelper + decInit2er + + h *MsgpackHandle + d *decoderBase + r ioDecReader + + bdAndBdread + + noBuiltInTypes +} + +func (e *encoderMsgpackIO) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderMsgpackIO) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderMsgpackIO) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderMsgpackIO) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderMsgpackIO) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderMsgpackIO) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderMsgpackIO) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderMsgpackIO) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderMsgpackIO) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderMsgpackIO) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderMsgpackIO) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderMsgpackIO) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderMsgpackIO) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderMsgpackIO) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderMsgpackIO) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderMsgpackIO) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderMsgpackIO) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderMsgpackIO) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderMsgpackIO) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderMsgpackIO) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderMsgpackIO) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderMsgpackIO) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderMsgpackIO) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderMsgpackIO) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderMsgpackIO) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderMsgpackIO) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderMsgpackIO) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderMsgpackIO) kSeqFn(rt reflect.Type) (fn *encFnMsgpackIO) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderMsgpackIO) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnMsgpackIO + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderMsgpackIO) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnMsgpackIO + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderMsgpackIO) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderMsgpackIO) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderMsgpackIO) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderMsgpackIO) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderMsgpackIO) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderMsgpackIO) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderMsgpackIO) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderMsgpackIO) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnMsgpackIO + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderMsgpackIO) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnMsgpackIO) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderMsgpackIO) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsMsgpackIO) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderMsgpackIO) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderMsgpackIO) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderMsgpackIO) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderMsgpackIO) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderMsgpackIO) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderMsgpackIO) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderMsgpackIO) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderMsgpackIO) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderMsgpackIO) encodeValue(rv reflect.Value, fn *encFnMsgpackIO) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderMsgpackIO) encodeValueNonNil(rv reflect.Value, fn *encFnMsgpackIO) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderMsgpackIO) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderMsgpackIO) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderMsgpackIO) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderMsgpackIO) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderMsgpackIO) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderMsgpackIO) fn(t reflect.Type) *encFnMsgpackIO { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderMsgpackIO) fnNoExt(t reflect.Type) *encFnMsgpackIO { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderMsgpackIO) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderMsgpackIO) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderMsgpackIO) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderMsgpackIO) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderMsgpackIO) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderMsgpackIO) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderMsgpackIO) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderMsgpackIO) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverMsgpackIO) newEncoderBytes(out *[]byte, h Handle) *encoderMsgpackIO { + var c1 encoderMsgpackIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverMsgpackIO) newEncoderIO(out io.Writer, h Handle) *encoderMsgpackIO { + var c1 encoderMsgpackIO + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverMsgpackIO) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsMsgpackIO) (f *fastpathEMsgpackIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverMsgpackIO) encFindRtidFn(s []encRtidFnMsgpackIO, rtid uintptr) (i uint, fn *encFnMsgpackIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverMsgpackIO) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnMsgpackIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnMsgpackIO](v)) + } + return +} + +func (dh helperEncDriverMsgpackIO) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsMsgpackIO, checkExt bool) (fn *encFnMsgpackIO) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverMsgpackIO) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsMsgpackIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnMsgpackIO) { + rtid := rt2id(rt) + var sp []encRtidFnMsgpackIO = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverMsgpackIO) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsMsgpackIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnMsgpackIO) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnMsgpackIO + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnMsgpackIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnMsgpackIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnMsgpackIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverMsgpackIO) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsMsgpackIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnMsgpackIO) { + fn = new(encFnMsgpackIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderMsgpackIO).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderMsgpackIO).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderMsgpackIO).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderMsgpackIO).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderMsgpackIO).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderMsgpackIO).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderMsgpackIO).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderMsgpackIO).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderMsgpackIO, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderMsgpackIO).kBool + case reflect.String: + + fn.fe = (*encoderMsgpackIO).kString + case reflect.Int: + fn.fe = (*encoderMsgpackIO).kInt + case reflect.Int8: + fn.fe = (*encoderMsgpackIO).kInt8 + case reflect.Int16: + fn.fe = (*encoderMsgpackIO).kInt16 + case reflect.Int32: + fn.fe = (*encoderMsgpackIO).kInt32 + case reflect.Int64: + fn.fe = (*encoderMsgpackIO).kInt64 + case reflect.Uint: + fn.fe = (*encoderMsgpackIO).kUint + case reflect.Uint8: + fn.fe = (*encoderMsgpackIO).kUint8 + case reflect.Uint16: + fn.fe = (*encoderMsgpackIO).kUint16 + case reflect.Uint32: + fn.fe = (*encoderMsgpackIO).kUint32 + case reflect.Uint64: + fn.fe = (*encoderMsgpackIO).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderMsgpackIO).kUintptr + case reflect.Float32: + fn.fe = (*encoderMsgpackIO).kFloat32 + case reflect.Float64: + fn.fe = (*encoderMsgpackIO).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderMsgpackIO).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderMsgpackIO).kComplex128 + case reflect.Chan: + fn.fe = (*encoderMsgpackIO).kChan + case reflect.Slice: + fn.fe = (*encoderMsgpackIO).kSlice + case reflect.Array: + fn.fe = (*encoderMsgpackIO).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderMsgpackIO).kStructSimple + } else { + fn.fe = (*encoderMsgpackIO).kStruct + } + case reflect.Map: + fn.fe = (*encoderMsgpackIO).kMap + case reflect.Interface: + + fn.fe = (*encoderMsgpackIO).kErr + default: + + fn.fe = (*encoderMsgpackIO).kErr + } + } + } + return +} +func (d *decoderMsgpackIO) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderMsgpackIO) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderMsgpackIO) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderMsgpackIO) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderMsgpackIO) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderMsgpackIO) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderMsgpackIO) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderMsgpackIO) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderMsgpackIO) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderMsgpackIO) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderMsgpackIO) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderMsgpackIO) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderMsgpackIO) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderMsgpackIO) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderMsgpackIO) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderMsgpackIO) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderMsgpackIO) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderMsgpackIO) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderMsgpackIO) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderMsgpackIO) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderMsgpackIO) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderMsgpackIO) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderMsgpackIO) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderMsgpackIO) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderMsgpackIO) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderMsgpackIO) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderMsgpackIO) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderMsgpackIO) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderMsgpackIO) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderMsgpackIO) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderMsgpackIO) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderMsgpackIO) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderMsgpackIO) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnMsgpackIO + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderMsgpackIO) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnMsgpackIO + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderMsgpackIO) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnMsgpackIO + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderMsgpackIO) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnMsgpackIO + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderMsgpackIO) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsMsgpackIO) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderMsgpackIO) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderMsgpackIO) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderMsgpackIO) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderMsgpackIO) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderMsgpackIO) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderMsgpackIO) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderMsgpackIO) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderMsgpackIO) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderMsgpackIO) Release() {} + +func (d *decoderMsgpackIO) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderMsgpackIO) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderMsgpackIO) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderMsgpackIO) decodeValue(rv reflect.Value, fn *decFnMsgpackIO) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderMsgpackIO) decodeValueNoCheckNil(rv reflect.Value, fn *decFnMsgpackIO) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderMsgpackIO) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderMsgpackIO) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderMsgpackIO) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderMsgpackIO) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderMsgpackIO) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderMsgpackIO) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderMsgpackIO) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderMsgpackIO) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderMsgpackIO) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderMsgpackIO) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderMsgpackIO) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderMsgpackIO) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderMsgpackIO) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderMsgpackIO) fn(t reflect.Type) *decFnMsgpackIO { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderMsgpackIO) fnNoExt(t reflect.Type) *decFnMsgpackIO { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverMsgpackIO) newDecoderBytes(in []byte, h Handle) *decoderMsgpackIO { + var c1 decoderMsgpackIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverMsgpackIO) newDecoderIO(in io.Reader, h Handle) *decoderMsgpackIO { + var c1 decoderMsgpackIO + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverMsgpackIO) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsMsgpackIO) (f *fastpathDMsgpackIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverMsgpackIO) decFindRtidFn(s []decRtidFnMsgpackIO, rtid uintptr) (i uint, fn *decFnMsgpackIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverMsgpackIO) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnMsgpackIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnMsgpackIO](v)) + } + return +} + +func (dh helperDecDriverMsgpackIO) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsMsgpackIO, + checkExt bool) (fn *decFnMsgpackIO) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverMsgpackIO) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsMsgpackIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnMsgpackIO) { + rtid := rt2id(rt) + var sp []decRtidFnMsgpackIO = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverMsgpackIO) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsMsgpackIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnMsgpackIO) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnMsgpackIO + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnMsgpackIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnMsgpackIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnMsgpackIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverMsgpackIO) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsMsgpackIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnMsgpackIO) { + fn = new(decFnMsgpackIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderMsgpackIO).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderMsgpackIO).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderMsgpackIO).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderMsgpackIO).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderMsgpackIO).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderMsgpackIO).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderMsgpackIO).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderMsgpackIO).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderMsgpackIO, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderMsgpackIO, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderMsgpackIO).kBool + case reflect.String: + fn.fd = (*decoderMsgpackIO).kString + case reflect.Int: + fn.fd = (*decoderMsgpackIO).kInt + case reflect.Int8: + fn.fd = (*decoderMsgpackIO).kInt8 + case reflect.Int16: + fn.fd = (*decoderMsgpackIO).kInt16 + case reflect.Int32: + fn.fd = (*decoderMsgpackIO).kInt32 + case reflect.Int64: + fn.fd = (*decoderMsgpackIO).kInt64 + case reflect.Uint: + fn.fd = (*decoderMsgpackIO).kUint + case reflect.Uint8: + fn.fd = (*decoderMsgpackIO).kUint8 + case reflect.Uint16: + fn.fd = (*decoderMsgpackIO).kUint16 + case reflect.Uint32: + fn.fd = (*decoderMsgpackIO).kUint32 + case reflect.Uint64: + fn.fd = (*decoderMsgpackIO).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderMsgpackIO).kUintptr + case reflect.Float32: + fn.fd = (*decoderMsgpackIO).kFloat32 + case reflect.Float64: + fn.fd = (*decoderMsgpackIO).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderMsgpackIO).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderMsgpackIO).kComplex128 + case reflect.Chan: + fn.fd = (*decoderMsgpackIO).kChan + case reflect.Slice: + fn.fd = (*decoderMsgpackIO).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderMsgpackIO).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderMsgpackIO).kStructSimple + } else { + fn.fd = (*decoderMsgpackIO).kStruct + } + case reflect.Map: + fn.fd = (*decoderMsgpackIO).kMap + case reflect.Interface: + + fn.fd = (*decoderMsgpackIO).kInterface + default: + + fn.fd = (*decoderMsgpackIO).kErr + } + } + } + return +} +func (e *msgpackEncDriverIO) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriverIO) EncodeInt(i int64) { + if e.h.PositiveIntUnsigned && i >= 0 { + e.EncodeUint(uint64(i)) + } else if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + e.w.writen2(bigen.PutUint16(uint16(i))) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + e.w.writen4(bigen.PutUint32(uint32(i))) + } else { + e.w.writen1(mpInt64) + e.w.writen8(bigen.PutUint64(uint64(i))) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + e.w.writen2(bigen.PutUint16(uint16(i))) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + e.w.writen4(bigen.PutUint32(uint32(i))) + } else { + e.w.writen1(mpInt64) + e.w.writen8(bigen.PutUint64(uint64(i))) + } +} + +func (e *msgpackEncDriverIO) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + e.w.writen2(bigen.PutUint16(uint16(i))) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + e.w.writen4(bigen.PutUint32(uint32(i))) + } else { + e.w.writen1(mpUint64) + e.w.writen8(bigen.PutUint64(uint64(i))) + } +} + +func (e *msgpackEncDriverIO) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriverIO) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) +} + +func (e *msgpackEncDriverIO) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) +} + +func (e *msgpackEncDriverIO) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, l) + } + switch l { + case 4: + e.w.writen4(bigen.PutUint32(uint32(data64))) + case 8: + e.w.writen8(bigen.PutUint64(data64)) + case 12: + e.w.writen4(bigen.PutUint32(uint32(nsec))) + e.w.writen8(bigen.PutUint64(uint64(sec))) + } +} + +func (e *msgpackEncDriverIO) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + var bs0, bs []byte + if ext == SelfExt { + bs0 = e.e.blist.get(1024) + bs = bs0 + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) + } else { + bs = ext.WriteExt(v) + } + if bs == nil { + e.writeNilBytes() + goto END + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeBytes(bs) + } +END: + if ext == SelfExt { + e.e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.e.blist.put(bs0) + } + } +} + +func (e *msgpackEncDriverIO) EncodeRawExt(re *RawExt) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriverIO) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + e.w.writen2(bigen.PutUint16(uint16(l))) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + e.w.writen4(bigen.PutUint32(uint32(l))) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriverIO) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriverIO) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriverIO) WriteArrayEmpty() { + + e.w.writen1(mpFixArrayMin) +} + +func (e *msgpackEncDriverIO) WriteMapEmpty() { + + e.w.writen1(mpFixMapMin) +} + +func (e *msgpackEncDriverIO) EncodeString(s string) { + var ct msgpackContainerType + if e.h.WriteExt { + if e.h.StringToRaw { + ct = msgpackContainerBin + } else { + ct = msgpackContainerStr + } + } else { + ct = msgpackContainerRawLegacy + } + e.writeContainerLen(ct, len(s)) + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriverIO) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *msgpackEncDriverIO) EncodeStringBytesRaw(bs []byte) { + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriverIO) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *msgpackEncDriverIO) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = mpNil + } + e.w.writen1(v) +} + +func (e *msgpackEncDriverIO) writeNilArray() { + e.writeNilOr(mpFixArrayMin) +} + +func (e *msgpackEncDriverIO) writeNilMap() { + e.writeNilOr(mpFixMapMin) +} + +func (e *msgpackEncDriverIO) writeNilBytes() { + e.writeNilOr(mpFixStrMin) +} + +func (e *msgpackEncDriverIO) writeContainerLen(ct msgpackContainerType, l int) { + if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.b8 > 0 && l < 256 { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + e.w.writen2(bigen.PutUint16(uint16(l))) + } else { + e.w.writen1(ct.b32) + e.w.writen4(bigen.PutUint32(uint32(l))) + } +} + +func (d *msgpackDecDriverIO) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.naked() + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readn2())) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readn4())) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readn8())) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readn2()))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readn4()))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readn8()))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + d.d.fauxUnionReadRawBytes(d, d.h.WriteExt, d.h.RawToString) + + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else { + n.l = d.r.readx(uint(clen)) + } + default: + halt.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } +} + +func (d *msgpackDecDriverIO) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *msgpackDecDriverIO) nextValueBytesBdReadR() { + bd := d.bd + + var clen uint + + switch bd { + case mpNil, mpFalse, mpTrue: + case mpUint8, mpInt8: + d.r.readn1() + case mpUint16, mpInt16: + d.r.skip(2) + case mpFloat, mpUint32, mpInt32: + d.r.skip(4) + case mpDouble, mpUint64, mpInt64: + d.r.skip(8) + case mpStr8, mpBin8: + clen = uint(d.r.readn1()) + d.r.skip(clen) + case mpStr16, mpBin16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + d.r.skip(clen) + case mpStr32, mpBin32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + d.r.skip(clen) + case mpFixExt1: + d.r.readn1() + d.r.readn1() + case mpFixExt2: + d.r.readn1() + d.r.skip(2) + case mpFixExt4: + d.r.readn1() + d.r.skip(4) + case mpFixExt8: + d.r.readn1() + d.r.skip(8) + case mpFixExt16: + d.r.readn1() + d.r.skip(16) + case mpExt8: + clen = uint(d.r.readn1()) + d.r.readn1() + d.r.skip(clen) + case mpExt16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + d.r.readn1() + d.r.skip(clen) + case mpExt32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + d.r.readn1() + d.r.skip(clen) + case mpArray16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case mpArray32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case mpMap16: + x := d.r.readn2() + clen = uint(bigen.Uint16(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + case mpMap32: + x := d.r.readn4() + clen = uint(bigen.Uint32(x)) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + case bd >= mpFixStrMin && bd <= mpFixStrMax: + clen = uint(mpFixStrMin ^ bd) + d.r.skip(clen) + case bd >= mpFixArrayMin && bd <= mpFixArrayMax: + clen = uint(mpFixArrayMin ^ bd) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + case bd >= mpFixMapMin && bd <= mpFixMapMax: + clen = uint(mpFixMapMin ^ bd) + for i := uint(0); i < clen; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + default: + halt.errorf("nextValueBytes: cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + return +} + +func (d *msgpackDecDriverIO) decFloat4Int32() (f float32) { + fbits := bigen.Uint32(d.r.readn4()) + f = math.Float32frombits(fbits) + if !noFrac32(fbits) { + halt.errorf("assigning integer value from float32 with a fraction: %v", f) + } + return +} + +func (d *msgpackDecDriverIO) decFloat4Int64() (f float64) { + fbits := bigen.Uint64(d.r.readn8()) + f = math.Float64frombits(fbits) + if !noFrac64(fbits) { + halt.errorf("assigning integer value from float64 with a fraction: %v", f) + } + return +} + +func (d *msgpackDecDriverIO) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readn2()))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readn4()))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readn8())) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readn2()))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readn4()))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readn8())) + case mpFloat: + i = int64(d.decFloat4Int32()) + case mpDouble: + i = int64(d.decFloat4Int64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + halt.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + } + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverIO) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readn2())) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readn4())) + case mpUint64: + ui = bigen.Uint64(d.r.readn8()) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readn2()))); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readn4()))); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readn8())); i >= 0 { + ui = uint64(i) + } else { + halt.errorf("assigning negative signed value: %v, to unsigned type", i) + } + case mpFloat: + if f := d.decFloat4Int32(); f >= 0 { + ui = uint64(f) + } else { + halt.errorf("assigning negative float value: %v, to unsigned type", f) + } + case mpDouble: + if f := d.decFloat4Int64(); f >= 0 { + ui = uint64(f) + } else { + halt.errorf("assigning negative float value: %v, to unsigned type", f) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + halt.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + halt.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + } + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverIO) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + } else { + f = float64(d.DecodeInt64()) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverIO) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.bd == mpFalse || d.bd == 0 { + + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + halt.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverIO) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + + var cond bool + bd := d.bd + var clen int + if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || + (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) + } else if bd == mpArray16 || bd == mpArray32 || + (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + slen := d.ReadArrayStart() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < len(bs); i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + return + } else { + halt.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + } + + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *msgpackDecDriverIO) DecodeStringAsBytes() (out []byte, state dBytesAttachState) { + out, state = d.DecodeBytes() + if d.h.ValidateUnicode && !utf8.Valid(out) { + halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out) + } + return +} + +func (d *msgpackDecDriverIO) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriverIO) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + return true + } + return +} + +func (d *msgpackDecDriverIO) TryNil() (v bool) { + return d.advanceNil() +} + +func (d *msgpackDecDriverIO) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + if bd == mpNil { + d.bdRead = false + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + return valueTypeBytes + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || + (bd >= mpFixStrMin && bd <= mpFixStrMax) { + if d.h.WriteExt || d.h.RawToString { + return valueTypeString + } + return valueTypeBytes + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } + return valueTypeUnset +} + +func (d *msgpackDecDriverIO) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readn2())) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readn4())) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + halt.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriverIO) ReadMapStart() int { + if d.advanceNil() { + return containerLenNil + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriverIO) ReadArrayStart() int { + if d.advanceNil() { + return containerLenNil + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriverIO) readExtLen() (clen int) { + switch d.bd { + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readn2())) + case mpExt32: + clen = int(bigen.Uint32(d.r.readn4())) + default: + halt.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriverIO) DecodeTime() (t time.Time) { + + if d.advanceNil() { + return + } + bd := d.bd + var clen int + if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || + (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) + } else { + + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + halt.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriverIO) decodeTime(clen int) (t time.Time) { + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(d.r.readn4())), 0).UTC() + case 8: + tv := bigen.Uint64(d.r.readn8()) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(d.r.readn4()) + sec := bigen.Uint64(d.r.readn8()) + t = time.Unix(int64(sec), int64(nsec)).UTC() + default: + halt.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) + } + return +} + +func (d *msgpackDecDriverIO) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) + } else { + ext.ReadExt(rv, xbs) + } +} + +func (d *msgpackDecDriverIO) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *msgpackDecDriverIO) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs, bstate = d.DecodeBytes() + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs, bstate = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + halt.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + } + xbs, ok = d.r.readxb(uint(clen)) + bstate = d.d.attachState(ok) + + } + d.bdRead = false + ok = true + return +} + +func (d *msgpackEncDriverIO) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*MsgpackHandle) + d.e = shared + if shared.bytes { + fp = msgpackFpEncBytes + } else { + fp = msgpackFpEncIO + } + + d.init2(enc) + return +} + +func (e *msgpackEncDriverIO) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *msgpackEncDriverIO) writerEnd() { e.w.end() } + +func (e *msgpackEncDriverIO) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *msgpackEncDriverIO) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *msgpackDecDriverIO) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*MsgpackHandle) + d.d = shared + if shared.bytes { + fp = msgpackFpDecBytes + } else { + fp = msgpackFpDecIO + } + + d.init2(dec) + return +} + +func (d *msgpackDecDriverIO) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *msgpackDecDriverIO) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *msgpackDecDriverIO) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *msgpackDecDriverIO) descBd() string { + return sprintf("%v (%s)", d.bd, mpdesc(d.bd)) +} + +func (d *msgpackDecDriverIO) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.notfastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/msgpack.notfastpath.mono.generated.go new file mode 100644 index 000000000..b4f7d2465 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.notfastpath.mono.generated.go @@ -0,0 +1,52 @@ +//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +type fastpathEMsgpackBytes struct { + rt reflect.Type + encfn func(*encoderMsgpackBytes, *encFnInfo, reflect.Value) +} +type fastpathDMsgpackBytes struct { + rt reflect.Type + decfn func(*decoderMsgpackBytes, *decFnInfo, reflect.Value) +} +type fastpathEsMsgpackBytes [0]fastpathEMsgpackBytes +type fastpathDsMsgpackBytes [0]fastpathDMsgpackBytes + +func (helperEncDriverMsgpackBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackBytes) bool { + return false +} +func (helperDecDriverMsgpackBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackBytes) bool { + return false +} + +func (helperEncDriverMsgpackBytes) fastpathEList() (v *fastpathEsMsgpackBytes) { return } +func (helperDecDriverMsgpackBytes) fastpathDList() (v *fastpathDsMsgpackBytes) { return } + +type fastpathEMsgpackIO struct { + rt reflect.Type + encfn func(*encoderMsgpackIO, *encFnInfo, reflect.Value) +} +type fastpathDMsgpackIO struct { + rt reflect.Type + decfn func(*decoderMsgpackIO, *decFnInfo, reflect.Value) +} +type fastpathEsMsgpackIO [0]fastpathEMsgpackIO +type fastpathDsMsgpackIO [0]fastpathDMsgpackIO + +func (helperEncDriverMsgpackIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackIO) bool { + return false +} +func (helperDecDriverMsgpackIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackIO) bool { + return false +} + +func (helperEncDriverMsgpackIO) fastpathEList() (v *fastpathEsMsgpackIO) { return } +func (helperDecDriverMsgpackIO) fastpathDList() (v *fastpathDsMsgpackIO) { return } diff --git a/vendor/github.com/ugorji/go/codec/reader.go b/vendor/github.com/ugorji/go/codec/reader.go index ec5dac0e9..86c403ab5 100644 --- a/vendor/github.com/ugorji/go/codec/reader.go +++ b/vendor/github.com/ugorji/go/codec/reader.go @@ -4,22 +4,30 @@ package codec import ( - "bufio" - "bytes" + "errors" "io" - "strings" + "os" ) // decReader abstracts the reading source, allowing implementations that can // read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - // readx will return a view of the []byte if decoding from a []byte, OR - // read into the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR - // create a new []byte and read into that +type decReaderI interface { + // readx will return a view of the []byte in one of 2 ways: + // - direct view into []byte which decoding is happening from (if bytes) + // - view into a mutable []byte which the ioReader is using (if IO) + // + // Users should directly consume the contents read, and not store for future use. readx(n uint) []byte + // skip n bytes + skip(n uint) + readb([]byte) + // readxb will read n bytes, returning as out, and a flag stating whether + // an internal buffer (not the view) was used. + readxb(n uint) (out []byte, usingBuf bool) + readn1() byte readn2() [2]byte readn3() [3]byte @@ -35,14 +43,22 @@ type decReader interface { // skip any whitespace characters, and return the first non-matching byte skipWhitespace() (token byte) - // jsonReadNum will include last read byte in first element of slice, - // and continue numeric characters until it sees a non-numeric char - // or EOF. If it sees a non-numeric character, it will unread that. - jsonReadNum() []byte + // jsonReadNum will read a sequence of numeric characters, checking from the last + // read byte. It will return a sequence of numeric characters (v), + // and the next token character (tok - returned separately), + // + // if an EOF is found before the next token is seen, it returns a token value of 0. + jsonReadNum() (v []byte, token byte) - // jsonReadAsisChars will read json plain characters (anything but " or \) - // and return a slice terminated by a non-json asis character. - jsonReadAsisChars() []byte + // jsonReadAsisChars recognizes 2 terminal characters (" or \). + // jsonReadAsisChars will read json plain characters until it reaches a terminal char, + // and returns a slice up to the terminal char (excluded), + // and also returns the terminal char separately (" or \). + jsonReadAsisChars() (v []byte, terminal byte) + + // readUntil will read characters until it reaches a ", + // return a slice up to " (excluded) + jsonReadUntilDblQuote() (v []byte) // skip will skip any byte that matches, and return the first non-matching byte // skip(accept *bitset256) (token byte) @@ -50,21 +66,28 @@ type decReader interface { // readTo will read any byte that matches, stopping once no-longer matching. // readTo(accept *bitset256) (out []byte) - // readUntil will read, only stopping once it matches the 'stop' byte (which it excludes). - readUntil(stop byte) (out []byte) + // // readUntil will read characters until it reaches a stop char, + // // return a slice up to the terminal byte (excluded) + // readUntil(stop byte) (out []byte) + + // only supported when reading from bytes + // bytesReadFrom(startpos uint) []byte + + // isBytes() bool + resetIO(r io.Reader, bufsize int, maxInitLen int, blist *bytesFreeList) + + resetBytes(in []byte) + + // nextValueBytes() captures bytes read between a call to startRecording and stopRecording. + // startRecording will always includes the last byte read. + startRecording() + // stopRecording will include all bytes read between the point of startRecording and now. + stopRecording() []byte } -// ------------------------------------------------ +// // ------------------------------------------------ -type unreadByteStatus uint8 - -// unreadByteStatus goes from -// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ... -const ( - unreadByteUndefined unreadByteStatus = iota - unreadByteCanRead - unreadByteCanUnread -) +const maxConsecutiveEmptyReads = 16 // 2 is sufficient, 16 is enough, 64 is optimal // const defBufReaderSize = 4096 @@ -74,185 +97,189 @@ const ( type ioReaderByteScanner interface { io.Reader io.ByteScanner - // ReadByte() (byte, error) - // UnreadByte() error - // Read(p []byte) (n int, err error) } -// ioReaderByteScannerT does a simple wrapper of a io.ByteScanner -// over a io.Reader -type ioReaderByteScannerT struct { - r io.Reader - - l byte // last byte - ls unreadByteStatus // last byte status - - _ [2]byte // padding - b [4]byte // tiny buffer for reading single bytes -} - -func (z *ioReaderByteScannerT) ReadByte() (c byte, err error) { - if z.ls == unreadByteCanRead { - z.ls = unreadByteCanUnread - c = z.l - } else { - _, err = z.Read(z.b[:1]) - c = z.b[0] - } - return -} - -func (z *ioReaderByteScannerT) UnreadByte() (err error) { - switch z.ls { - case unreadByteCanUnread: - z.ls = unreadByteCanRead - case unreadByteCanRead: - err = errDecUnreadByteLastByteNotRead - case unreadByteUndefined: - err = errDecUnreadByteNothingToRead - default: - err = errDecUnreadByteUnknown - } - return -} - -func (z *ioReaderByteScannerT) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return - } - var firstByte bool - if z.ls == unreadByteCanRead { - z.ls = unreadByteCanUnread - p[0] = z.l - if len(p) == 1 { - n = 1 - return - } - firstByte = true - p = p[1:] - } - n, err = z.r.Read(p) - if n > 0 { - if err == io.EOF && n == len(p) { - err = nil // read was successful, so postpone EOF (till next time) - } - z.l = p[n-1] - z.ls = unreadByteCanUnread - } - if firstByte { - n++ - } - return -} - -func (z *ioReaderByteScannerT) reset(r io.Reader) { - z.r = r - z.ls = unreadByteUndefined - z.l = 0 -} +// MARKER: why not separate bufioDecReader from ioDecReader? +// +// We tried, but only readn1 of bufioDecReader came close to being +// inlined (at inline cost 82). All other methods were at inline cost >= 90. +// +// Consequently, there's no performance impact from having both together +// (except a single if z.bufio branch, which is likely well predicted and happens +// only once per call (right at the top). // ioDecReader is a decReader that reads off an io.Reader. type ioDecReader struct { - rr ioReaderByteScannerT // the reader passed in, wrapped into a reader+bytescanner + r io.Reader + + blist *bytesFreeList + + maxInitLen uint n uint // num read - blist *bytesFreelist + bufsize uint - bufr []byte // buffer for readTo/readUntil - br ioReaderByteScanner // main reader used for Read|ReadByte|UnreadByte - bb *bufio.Reader // created internally, and reused on reset if needed + bufio bool // are we buffering (rc and wc are valid) + rbr bool // r is a byte reader + recording bool // are we recording (src and erc are valid) + done bool // did we reach EOF and are we done? - x [64 + 40]byte // for: get struct field name, swallow valueTypeBytes, etc + // valid when: bufio=false + b [1]byte // tiny buffer for reading single byte (if z.br == nil) + l byte // last byte read + br io.ByteReader // main reader used for ReadByte + + // valid when: bufio=true + wc uint // read cursor + rc uint // write cursor + err error + + // valid when: recording=true + recc uint // start-recording cursor (valid: recording=true) + + buf []byte // buffer for bufio OR recording (if !bufio) } -func (z *ioDecReader) reset(r io.Reader, bufsize int, blist *bytesFreelist) { +func (z *ioDecReader) resetBytes(in []byte) { + halt.errorStr("resetBytes unsupported by ioDecReader") +} + +func (z *ioDecReader) resetIO(r io.Reader, bufsize int, maxInitLen int, blist *bytesFreeList) { + buf := z.buf + *z = ioDecReader{} + z.maxInitLen = max(1024, uint(maxInitLen)) z.blist = blist - z.n = 0 - z.bufr = z.blist.check(z.bufr, 256) - z.br = nil - - var ok bool - - if bufsize <= 0 { - z.br, ok = r.(ioReaderByteScanner) - if !ok { - z.rr.reset(r) - z.br = &z.rr - } - return + z.buf = blist.check(buf, max(256, bufsize)) + z.bufsize = uint(max(0, bufsize)) + z.bufio = z.bufsize > 0 + if z.bufio { + z.buf = z.buf[:cap(z.buf)] + } else { + z.buf = z.buf[:0] } - - // bufsize > 0 ... - - // if bytes.[Buffer|Reader], no value in adding extra buffer - // if bufio.Reader, no value in extra buffer unless size changes - switch bb := r.(type) { - case *strings.Reader: - z.br = bb - case *bytes.Buffer: - z.br = bb - case *bytes.Reader: - z.br = bb - case *bufio.Reader: - if bb.Size() == bufsize { - z.br = bb - } - } - - if z.br == nil { - if z.bb != nil && z.bb.Size() == bufsize { - z.bb.Reset(r) - } else { - z.bb = bufio.NewReaderSize(r, bufsize) - } - z.br = z.bb + if r == nil { + z.r = &eofReader + } else { + z.r = r } + z.br, z.rbr = z.r.(io.ByteReader) } func (z *ioDecReader) numread() uint { return z.n } -func (z *ioDecReader) readn1() (b uint8) { - b, err := z.br.ReadByte() - halt.onerror(err) - z.n++ - return +func (z *ioDecReader) readn2() [2]byte { + return ([2]byte)(z.readx(2)) + // using readb forced return bs onto heap, unnecessarily + // z.readb(bs[:]) + // return } -func (z *ioDecReader) readn2() (bs [2]byte) { - z.readb(bs[:]) - return +func (z *ioDecReader) readn3() [3]byte { + return ([3]byte)(z.readx(3)) } -func (z *ioDecReader) readn3() (bs [3]byte) { - z.readb(bs[:]) - return +func (z *ioDecReader) readn4() [4]byte { + return ([4]byte)(z.readx(4)) } -func (z *ioDecReader) readn4() (bs [4]byte) { - z.readb(bs[:]) - return -} - -func (z *ioDecReader) readn8() (bs [8]byte) { - z.readb(bs[:]) - return +func (z *ioDecReader) readn8() [8]byte { + return ([8]byte)(z.readx(8)) } func (z *ioDecReader) readx(n uint) (bs []byte) { - if n == 0 { - return zeroByteSlice + return bytesOK(z.readxb(n)) +} + +func (z *ioDecReader) readErr() (err error) { + err, z.err = z.err, nil + return +} + +func (z *ioDecReader) checkErr() { + halt.onerror(z.readErr()) +} + +func (z *ioDecReader) readOne() (b byte, err error) { + n, err := z.r.Read(z.b[:]) + if n == 1 { + err = nil + b = z.b[0] } - if n < uint(len(z.x)) { - bs = z.x[:n] + return +} + +// fillbuf reads a new chunk into the buffer. +func (z *ioDecReader) fillbuf(bufsize uint) (numShift, numRead uint) { + z.checkErr() + bufsize = max(bufsize, z.bufsize) + + // Slide existing data to beginning. + if z.recording { + numShift = z.recc // recc is always <= rc } else { - bs = make([]byte, n) + numShift = z.rc } - nn, err := readFull(z.br, bs) - z.n += nn - halt.onerror(err) + if numShift > 0 { + numShift-- // never shift last byte read out + } + copy(z.buf, z.buf[numShift:z.wc]) + z.wc -= numShift + z.rc -= numShift + if z.recording { + z.recc -= numShift + } + // add enough to allow u to read up to bufsize again iff + // - buf is fully written + // - NOTE: don't pre-allocate more until needed + if uint(len(z.buf)) == z.wc { + if bufsize+z.wc < uint(cap(z.buf)) { + z.buf = z.buf[:uint(cap(z.buf))] + } else { + bufsize = max(uint(cap(z.buf)*3/2), bufsize+z.wc) + buf := z.blist.get(int(bufsize)) + buf = buf[:cap(buf)] + copy(buf, z.buf[:z.wc]) + z.blist.put(z.buf) + z.buf = buf + } + } + // Read new data: try a limited number of times. + // if n == 0: try up to maxConsecutiveEmptyReads + // if n > 0 and err == nil: try one more time (to see if we get n == 0 and EOF) + for i := maxConsecutiveEmptyReads; i > 0; i-- { + n, err := z.r.Read(z.buf[z.wc:]) + numRead += uint(n) + z.wc += uint(n) + if err != nil { + // if os read dealine, and we have read something, return + z.err = err + if err == io.EOF { + z.done = true + } else if errors.Is(err, os.ErrDeadlineExceeded) { + // os read deadline, but some bytes read: return (don't store err) + z.err = nil + } + return + } + + // if z.wc == uint(len(z.buf)) { + // return + // } + // only read one time if results returned + // if n > 0 && i > 2 { + // i = 2 // try max one more time (to see about getting EOF) + // } + + // Once you have some data from this read call, move on. + // Consequently, a blocked Read has less chance of happening. + if n > 0 { + return + } + } + z.err = io.ErrNoProgress // either no data read OR not enough data read, without an EOF return } @@ -260,90 +287,378 @@ func (z *ioDecReader) readb(bs []byte) { if len(bs) == 0 { return } - nn, err := readFull(z.br, bs) + var err error + var n int + if z.bufio { + BUFIO: + for z.rc == z.wc { + z.fillbuf(0) + } + n = copy(bs, z.buf[z.rc:z.wc]) + z.rc += uint(n) + z.n += uint(n) + if n == len(bs) { + return + } + bs = bs[n:] + goto BUFIO + } + + // -------- NOT BUFIO ------ + + var nn uint + bs0 := bs +READER: + n, err = z.r.Read(bs) + if n > 0 { + z.l = bs[n-1] + nn += uint(n) + bs = bs[n:] + } + if len(bs) != 0 && err == nil { + goto READER + } + if z.recording { + z.buf = append(z.buf, bs0[:nn]...) + } z.n += nn - halt.onerror(err) -} - -// func (z *ioDecReader) readn1eof() (b uint8, eof bool) { -// b, err := z.br.ReadByte() -// if err == nil { -// z.n++ -// } else if err == io.EOF { -// eof = true -// } else { -// halt.onerror(err) -// } -// return -// } - -func (z *ioDecReader) jsonReadNum() (bs []byte) { - z.unreadn1() - z.bufr = z.bufr[:0] -LOOP: - // i, eof := z.readn1eof() - i, err := z.br.ReadByte() - if err == io.EOF { - return z.bufr - } - if err != nil { + if len(bs) != 0 { halt.onerror(err) - } - z.n++ - if isNumberChar(i) { - z.bufr = append(z.bufr, i) - goto LOOP - } - z.unreadn1() - return z.bufr -} - -func (z *ioDecReader) jsonReadAsisChars() (bs []byte) { - z.bufr = z.bufr[:0] -LOOP: - i := z.readn1() - z.bufr = append(z.bufr, i) - if i == '"' || i == '\\' { - return z.bufr - } - goto LOOP -} - -func (z *ioDecReader) skipWhitespace() (token byte) { -LOOP: - token = z.readn1() - if isWhitespaceChar(token) { - goto LOOP + halt.errorf("ioDecReader.readb read %d out of %d bytes requested", nn, len(bs0)) } return } -// func (z *ioDecReader) readUntil(stop byte) []byte { -// z.bufr = z.bufr[:0] -// LOOP: -// token := z.readn1() -// z.bufr = append(z.bufr, token) -// if token == stop { -// return z.bufr[:len(z.bufr)-1] -// } -// goto LOOP -// } - -func (z *ioDecReader) readUntil(stop byte) []byte { - z.bufr = z.bufr[:0] -LOOP: - token := z.readn1() - if token == stop { - return z.bufr +func (z *ioDecReader) readn1() (b uint8) { + if z.bufio { + for z.rc == z.wc { + z.fillbuf(0) + } + b = z.buf[z.rc] + z.rc++ + z.n++ + return } - z.bufr = append(z.bufr, token) - goto LOOP + + // -------- NOT BUFIO ------ + + var err error + if z.rbr { + b, err = z.br.ReadByte() + } else { + b, err = z.readOne() + } + halt.onerror(err) + z.l = b + z.n++ + if z.recording { + z.buf = append(z.buf, b) + } + return } -func (z *ioDecReader) unreadn1() { - err := z.br.UnreadByte() +func (z *ioDecReader) readxb(n uint) (out []byte, useBuf bool) { + if n == 0 { + return zeroByteSlice, false + } + + if z.bufio { + BUFIO: + nn := int(n+z.rc) - int(z.wc) + if nn > 0 { + z.fillbuf(decInferLen(nn, z.maxInitLen, 1)) + goto BUFIO + } + pos := z.rc + z.rc += uint(n) + z.n += uint(n) + out = z.buf[pos:z.rc] + useBuf = true + return + } + + // -------- NOT BUFIO ------ + + useBuf = true + out = z.buf + r0 := uint(len(out)) + r := r0 + nn := int(n) + var n2 uint + for nn > 0 { + n2 = r + decInferLen(int(nn), z.maxInitLen, 1) + if cap(out) < int(n2) { + out2 := z.blist.putGet(out, int(n2))[:n2] // make([]byte, len2+len3) + copy(out2, out) + out = out2 + } else { + out = out[:n2] + } + n3, err := z.r.Read(out[r:n2]) + if n3 > 0 { + z.l = out[r+uint(n3)-1] + nn -= n3 + r += uint(n3) + } + halt.onerror(err) + } + z.buf = out[:r0+n] + out = out[r0 : r0+n] + z.n += n + return +} + +func (z *ioDecReader) skip(n uint) { + if n == 0 { + return + } + + if z.bufio { + BUFIO: + n2 := min(n, z.wc-z.rc) + // handle in-line, so z.buf doesn't grow much (since we're skipping) + // ie by setting z.rc, fillbuf should keep shifting left (unless recording) + z.rc += n2 + z.n += n2 + n -= n2 + if n > 0 { + z.fillbuf(decInferLen(int(n+z.rc)-int(z.wc), z.maxInitLen, 1)) + goto BUFIO + } + return + } + + // -------- NOT BUFIO ------ + + var out []byte + var fromBlist bool + if z.recording { + out = z.buf + } else { + nn := int(decInferLen(int(n), z.maxInitLen, 1)) + if cap(z.buf) >= nn/2 { + out = z.buf[:cap(z.buf)] + } else { + fromBlist = true + out = z.blist.get(nn) + } + } + + var r, n2 uint + nn := int(n) + for nn > 0 { + n2 = uint(nn) + if z.recording { + r = uint(len(out)) + n2 = r + decInferLen(int(nn), z.maxInitLen, 1) + if cap(out) < int(n2) { + out2 := z.blist.putGet(out, int(n2))[:n2] // make([]byte, len2+len3) + copy(out2, out) + out = out2 + } else { + out = out[:n2] + } + } + n3, err := z.r.Read(out[r:n2]) + if n3 > 0 { + z.l = out[r+uint(n3)-1] + z.n += uint(n3) + nn -= n3 + } + halt.onerror(err) + } + if z.recording { + z.buf = out + } else if fromBlist { + z.blist.put(out) + } + return +} + +// ---- JSON SPECIFIC HELPERS HERE ---- + +func (z *ioDecReader) jsonReadNum() (bs []byte, token byte) { + var start, pos, end uint + if z.bufio { + // read and fill into buf, then take substring + start = z.rc - 1 // include last byte read + pos = start + BUFIO: + if pos == z.wc { + if z.done { + end = pos + goto END + } + numshift, numread := z.fillbuf(0) + start -= numshift + pos -= numshift + if numread == 0 { + end = pos + goto END + } + } + token = z.buf[pos] + pos++ + if isNumberChar(token) { + goto BUFIO + } + end = pos - 1 + END: + z.n += (pos - z.rc) + z.rc = pos + return z.buf[start:end], token + } + + // if not recording, add the last read byte into buf + if !z.recording { + z.buf = append(z.buf[:0], z.l) + } + start = uint(len(z.buf) - 1) // incl last byte in z.buf + var b byte + var err error + +READER: + if z.rbr { + b, err = z.br.ReadByte() + } else { + b, err = z.readOne() + } + if err == io.EOF { + return z.buf[start:], 0 + } halt.onerror(err) - z.n-- + z.l = b + z.n++ + z.buf = append(z.buf, b) + if isNumberChar(b) { + goto READER + } + return z.buf[start : len(z.buf)-1], b +} + +func (z *ioDecReader) skipWhitespace() (tok byte) { + var pos uint + if z.bufio { + pos = z.rc + BUFIO: + if pos == z.wc { + if z.done { + halt.onerror(io.ErrUnexpectedEOF) + } + numshift, numread := z.fillbuf(0) + pos -= numshift + if numread == 0 { + halt.onerror(io.ErrUnexpectedEOF) + } + } + tok = z.buf[pos] + pos++ + if isWhitespaceChar(tok) { + goto BUFIO + } + z.n += (pos - z.rc) + z.rc = pos + return tok + } + + var err error +READER: + if z.rbr { + tok, err = z.br.ReadByte() + } else { + tok, err = z.readOne() + } + halt.onerror(err) + z.n++ + z.l = tok + if z.recording { + z.buf = append(z.buf, tok) + } + if isWhitespaceChar(tok) { + goto READER + } + return tok +} + +func (z *ioDecReader) readUntil(stop1, stop2 byte) (bs []byte, tok byte) { + var start, pos uint + if z.bufio { + start = z.rc + pos = start + BUFIO: + if pos == z.wc { + if z.done { + halt.onerror(io.ErrUnexpectedEOF) + } + numshift, numread := z.fillbuf(0) + start -= numshift + pos -= numshift + if numread == 0 { + halt.onerror(io.ErrUnexpectedEOF) + } + } + tok = z.buf[pos] + pos++ + if tok == stop1 || tok == stop2 { + z.n += (pos - z.rc) + z.rc = pos + return z.buf[start : pos-1], tok + } + goto BUFIO + } + + var err error + if !z.recording { + z.buf = z.buf[:0] + } + start = uint(len(z.buf)) +READER: + if z.rbr { + tok, err = z.br.ReadByte() + } else { + tok, err = z.readOne() + } + halt.onerror(err) + z.n++ + z.l = tok + z.buf = append(z.buf, tok) + if tok == stop1 || tok == stop2 { + return z.buf[start : len(z.buf)-1], tok + } + goto READER +} + +func (z *ioDecReader) jsonReadAsisChars() (bs []byte, tok byte) { + return z.readUntil('"', '\\') +} + +func (z *ioDecReader) jsonReadUntilDblQuote() (bs []byte) { + bs, _ = z.readUntil('"', 0) + return +} + +// ---- start/stop recording ---- + +func (z *ioDecReader) startRecording() { + z.recording = true + // always include last byte read + if z.bufio { + z.recc = z.rc - 1 + } else { + z.buf = append(z.buf[:0], z.l) + } +} + +func (z *ioDecReader) stopRecording() (v []byte) { + z.recording = false + if z.bufio { + v = z.buf[z.recc:z.rc] + z.recc = 0 + } else { + v = z.buf + z.buf = z.buf[:0] + } + return } // ------------------------------------ @@ -359,11 +674,18 @@ func (z *ioDecReader) unreadn1() { // // see panicValToErr(...) function in helper.go. type bytesDecReader struct { - b []byte // data - c uint // cursor + b []byte // data + c uint // cursor + r uint // recording cursor + xb []byte // buffer for readxb } -func (z *bytesDecReader) reset(in []byte) { +func (z *bytesDecReader) resetIO(r io.Reader, bufsize int, maxInitLen int, blist *bytesFreeList) { + halt.errorStr("resetIO unsupported by bytesDecReader") +} + +func (z *bytesDecReader) resetBytes(in []byte) { + // it's ok to resize a nil slice, so long as it's not past 0 z.b = in[:len(in):len(in)] // reslicing must not go past capacity z.c = 0 } @@ -377,41 +699,26 @@ func (z *bytesDecReader) numread() uint { // However, we do it only once, and it's better than reslicing both z.b and return value. func (z *bytesDecReader) readx(n uint) (bs []byte) { - // x := z.c + n - // bs = z.b[z.c:x] - // z.c = x bs = z.b[z.c : z.c+n] z.c += n return } +func (z *bytesDecReader) skip(n uint) { + if z.c+n > uint(cap(z.b)) { + halt.error(&outOfBoundsError{uint(cap(z.b)), z.c + n}) + } + z.c += n +} + +func (z *bytesDecReader) readxb(n uint) (out []byte, usingBuf bool) { + return z.readx(n), false +} + func (z *bytesDecReader) readb(bs []byte) { copy(bs, z.readx(uint(len(bs)))) } -// MARKER: do not use this - as it calls into memmove (as the size of data to move is unknown) -// func (z *bytesDecReader) readnn(bs []byte, n uint) { -// x := z.c -// copy(bs, z.b[x:x+n]) -// z.c += n -// } - -// func (z *bytesDecReader) readn(num uint8) (bs [8]byte) { -// x := z.c + uint(num) -// copy(bs[:], z.b[z.c:x]) // slice z.b completely, so we get bounds error if past -// z.c = x -// return -// } - -// func (z *bytesDecReader) readn1() uint8 { -// z.c++ -// return z.b[z.c-1] -// } - -// MARKER: readn{1,2,3,4,8} should throw an out of bounds error if past length. -// MARKER: readn1: explicitly ensure bounds check is done -// MARKER: readn{2,3,4,8}: ensure you slice z.b completely so we get bounds error if past end. - func (z *bytesDecReader) readn1() (v uint8) { v = z.b[z.c] z.c++ @@ -419,59 +726,58 @@ func (z *bytesDecReader) readn1() (v uint8) { } func (z *bytesDecReader) readn2() (bs [2]byte) { - // copy(bs[:], z.b[z.c:z.c+2]) - // bs[1] = z.b[z.c+1] - // bs[0] = z.b[z.c] - bs = okBytes2(z.b[z.c : z.c+2]) + bs = [2]byte(z.b[z.c:]) z.c += 2 return } func (z *bytesDecReader) readn3() (bs [3]byte) { - // copy(bs[1:], z.b[z.c:z.c+3]) - bs = okBytes3(z.b[z.c : z.c+3]) + bs = [3]byte(z.b[z.c:]) z.c += 3 return } func (z *bytesDecReader) readn4() (bs [4]byte) { - // copy(bs[:], z.b[z.c:z.c+4]) - bs = okBytes4(z.b[z.c : z.c+4]) + bs = [4]byte(z.b[z.c:]) z.c += 4 return } func (z *bytesDecReader) readn8() (bs [8]byte) { - // copy(bs[:], z.b[z.c:z.c+8]) - bs = okBytes8(z.b[z.c : z.c+8]) + bs = [8]byte(z.b[z.c:]) z.c += 8 return } -func (z *bytesDecReader) jsonReadNum() []byte { - z.c-- // unread - i := z.c +func (z *bytesDecReader) jsonReadNum() (bs []byte, token byte) { + start := z.c - 1 // include last byte + i := start LOOP: - // gracefully handle end of slice, as end of stream is meaningful here - if i < uint(len(z.b)) && isNumberChar(z.b[i]) { - i++ - goto LOOP + // gracefully handle end of slice (~= EOF) + if i < uint(len(z.b)) { + if isNumberChar(z.b[i]) { + i++ + goto LOOP + } + token = z.b[i] } - z.c, i = i, z.c - // MARKER: 20230103: byteSliceOf here prevents inlining of jsonReadNum - // return byteSliceOf(z.b, i, z.c) - return z.b[i:z.c] + z.c = i + 1 + bs = z.b[start:i] // byteSliceOf(z.b, start, i) + return } -func (z *bytesDecReader) jsonReadAsisChars() []byte { +func (z *bytesDecReader) jsonReadAsisChars() (bs []byte, token byte) { i := z.c LOOP: - token := z.b[i] + token = z.b[i] i++ if token == '"' || token == '\\' { - z.c, i = i, z.c - return byteSliceOf(z.b, i, z.c) - // return z.b[i:z.c] + // z.c, i = i, z.c + // return byteSliceOf(z.b, i, z.c-1), token + bs = z.b[z.c : i-1] + z.c = i + return + // return z.b[i : z.c-1], token } goto LOOP } @@ -479,21 +785,10 @@ LOOP: func (z *bytesDecReader) skipWhitespace() (token byte) { i := z.c LOOP: + // setting token before check reduces inlining cost, + // making containerNext inlineable token = z.b[i] - if isWhitespaceChar(token) { - i++ - goto LOOP - } - z.c = i + 1 - return -} - -func (z *bytesDecReader) readUntil(stop byte) (out []byte) { - i := z.c -LOOP: - if z.b[i] == stop { - out = byteSliceOf(z.b, z.c, i) - // out = z.b[z.c:i] + if !isWhitespaceChar(token) { z.c = i + 1 return } @@ -501,107 +796,35 @@ LOOP: goto LOOP } -// -------------- - -type decRd struct { - rb bytesDecReader - ri *ioDecReader - - decReader - - bytes bool // is bytes reader - - // MARKER: these fields below should belong directly in Encoder. - // we pack them here for space efficiency and cache-line optimization. - - mtr bool // is maptype a known type? - str bool // is slicetype a known type? - - be bool // is binary encoding - js bool // is json handle - jsms bool // is json handle, and MapKeyAsString - cbor bool // is cbor handle - - cbreak bool // is a check breaker - -} - -// From out benchmarking, we see the following impact performance: -// -// - functions that are too big to inline -// - interface calls (as no inlining can occur) -// -// decRd is designed to embed a decReader, and then re-implement some of the decReader -// methods using a conditional branch. -// -// We only override the ones where the bytes version is inlined AND the wrapper method -// (containing the bytes version alongside a conditional branch) is also inlined. -// -// We use ./run.sh -z to check. -// -// Right now, only numread and "carefully crafted" readn1 can be inlined. - -func (z *decRd) numread() uint { - if z.bytes { - return z.rb.numread() - } - return z.ri.numread() -} - -func (z *decRd) readn1() (v uint8) { - if z.bytes { - // return z.rb.readn1() - // MARKER: calling z.rb.readn1() prevents decRd.readn1 from being inlined. - // copy code, to manually inline and explicitly return here. - // Keep in sync with bytesDecReader.readn1 - v = z.rb.b[z.rb.c] - z.rb.c++ +func (z *bytesDecReader) jsonReadUntilDblQuote() (out []byte) { + i := z.c +LOOP: + if z.b[i] == '"' { + out = z.b[z.c:i] // byteSliceOf(z.b, z.c, i) + z.c = i + 1 return } - return z.ri.readn1() + i++ + goto LOOP } -// func (z *decRd) readn4() [4]byte { -// if z.bytes { -// return z.rb.readn4() -// } -// return z.ri.readn4() -// } +func (z *bytesDecReader) startRecording() { + z.r = z.c - 1 +} -// func (z *decRd) readn3() [3]byte { -// if z.bytes { -// return z.rb.readn3() -// } -// return z.ri.readn3() -// } - -// func (z *decRd) skipWhitespace() byte { -// if z.bytes { -// return z.rb.skipWhitespace() -// } -// return z.ri.skipWhitespace() -// } +func (z *bytesDecReader) stopRecording() (v []byte) { + v = z.b[z.r:z.c] + z.r = 0 + return +} type devNullReader struct{} func (devNullReader) Read(p []byte) (int, error) { return 0, io.EOF } func (devNullReader) Close() error { return nil } +func (devNullReader) ReadByte() (byte, error) { return 0, io.EOF } +func (devNullReader) UnreadByte() error { return io.EOF } -func readFull(r io.Reader, bs []byte) (n uint, err error) { - var nn int - for n < uint(len(bs)) && err == nil { - nn, err = r.Read(bs[n:]) - if nn > 0 { - if err == io.EOF { - // leave EOF for next time - err = nil - } - n += uint(nn) - } - } - // do not do this below - it serves no purpose - // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } - return -} - -var _ decReader = (*decRd)(nil) +// MARKER: readn{1,2,3,4,8} should throw an out of bounds error if past length. +// MARKER: readn1: explicitly ensure bounds check is done +// MARKER: readn{2,3,4,8}: ensure you slice z.b completely so we get bounds error if past end. diff --git a/vendor/github.com/ugorji/go/codec/register_ext.go b/vendor/github.com/ugorji/go/codec/register_ext.go deleted file mode 100644 index 65e455377..000000000 --- a/vendor/github.com/ugorji/go/codec/register_ext.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import "reflect" - -// This file exists, so that the files for specific formats do not all import reflect. -// This just helps us ensure that reflect package is isolated to a few files. - -// SetInterfaceExt sets an extension -func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, makeExt(ext)) -} - -// SetInterfaceExt sets an extension -func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, makeExt(ext)) -} - -// SetBytesExt sets an extension -func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, makeExt(ext)) -} - -// SetBytesExt sets an extension -func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, makeExt(ext)) -} - -// SetBytesExt sets an extension -func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, makeExt(ext)) -} - -// func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { -// return h.SetExt(rt, tag, &interfaceExtWrapper{InterfaceExt: ext}) -// } diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go index 0da8ad577..bdc535b6d 100644 --- a/vendor/github.com/ugorji/go/codec/rpc.go +++ b/vendor/github.com/ugorji/go/codec/rpc.go @@ -4,10 +4,11 @@ package codec import ( - "bufio" "errors" "io" + "net" "net/rpc" + "sync/atomic" ) var ( @@ -28,57 +29,44 @@ type RPCOptions struct { // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. // // Set RPCNoBuffer=true to turn buffering off. + // // Buffering can still be done if buffered connections are passed in, or // buffering is configured on the handle. + // + // Deprecated: Buffering should be configured at the Handle or by using a buffer Reader. + // Setting this has no effect anymore (after v1.2.12 - authored 2025-05-06) RPCNoBuffer bool } // rpcCodec defines the struct members and common methods. type rpcCodec struct { - c io.Closer - r io.Reader - w io.Writer - f ioFlusher - + c io.Closer + r io.Reader + w io.Writer + f ioFlusher + nc net.Conn dec *Decoder enc *Encoder h Handle - cls atomicClsErr + cls atomic.Pointer[clsErr] } -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - return newRPCCodec2(conn, conn, conn, h) -} - -func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { - bh := h.getBasicHandle() - // if the writer can flush, ensure we leverage it, else - // we may hang waiting on read if write isn't flushed. - // var f ioFlusher - f, ok := w.(ioFlusher) - if !bh.RPCNoBuffer { - if bh.WriterBufferSize <= 0 { - if !ok { // a flusher means there's already a buffer - bw := bufio.NewWriter(w) - f, w = bw, bw - } - } - if bh.ReaderBufferSize <= 0 { - if _, ok = w.(ioBuffered); !ok { - r = bufio.NewReader(r) - } - } - } - return rpcCodec{ - c: c, - w: w, - r: r, - f: f, +func newRPCCodec(conn io.ReadWriteCloser, h Handle) *rpcCodec { + nc, _ := conn.(net.Conn) + f, _ := conn.(ioFlusher) + rc := &rpcCodec{ h: h, - enc: NewEncoder(w, h), - dec: NewDecoder(r, h), + c: conn, + w: conn, + r: conn, + f: f, + nc: nc, + enc: NewEncoder(conn, h), + dec: NewDecoder(conn, h), } + rc.cls.Store(new(clsErr)) + return rc } func (c *rpcCodec) write(obj ...interface{}) (err error) { @@ -116,10 +104,16 @@ func (c *rpcCodec) write(obj ...interface{}) (err error) { func (c *rpcCodec) read(obj interface{}) (err error) { err = c.ready() if err == nil { - //If nil is passed in, we should read and discard + // Setting ReadDeadline should not be necessary, + // especially since it only works for net.Conn (not generic ioReadCloser). + // if c.nc != nil { + // c.nc.SetReadDeadline(time.Now().Add(1 * time.Second)) + // } + + // Note: If nil is passed in, we should read and discard if obj == nil { // return c.dec.Decode(&obj) - err = c.dec.swallowErr() + err = panicToErr(c.dec, func() { c.dec.swallow() }) } else { err = c.dec.Decode(obj) } @@ -129,11 +123,11 @@ func (c *rpcCodec) read(obj interface{}) (err error) { func (c *rpcCodec) Close() (err error) { if c.c != nil { - cls := c.cls.load() + cls := c.cls.Load() if !cls.closed { - cls.err = c.c.Close() - cls.closed = true - c.cls.store(cls) + // writing to same pointer could lead to a data race (always make new one) + cls = &clsErr{closed: true, err: c.c.Close()} + c.cls.Store(cls) } err = cls.err } @@ -144,8 +138,8 @@ func (c *rpcCodec) ready() (err error) { if c.c == nil { err = errRpcNoConn } else { - cls := c.cls.load() - if cls.closed { + cls := c.cls.Load() + if cls != nil && cls.closed { if err = cls.err; err == nil { err = errRpcIsClosed } @@ -161,7 +155,7 @@ func (c *rpcCodec) ReadResponseBody(body interface{}) error { // ------------------------------------- type goRpcCodec struct { - rpcCodec + *rpcCodec } func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { diff --git a/vendor/github.com/ugorji/go/codec/simple.base.go b/vendor/github.com/ugorji/go/codec/simple.base.go new file mode 100644 index 000000000..2522282cf --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.base.go @@ -0,0 +1,97 @@ +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + simpleVdTime = 24 + + // containers: each lasts for 8 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +var simpledescNames = map[byte]string{ + simpleVdNil: "null", + simpleVdFalse: "false", + simpleVdTrue: "true", + simpleVdFloat32: "float32", + simpleVdFloat64: "float64", + + simpleVdPosInt: "+int", + simpleVdNegInt: "-int", + + simpleVdTime: "time", + + simpleVdString: "string", + simpleVdByteArray: "binary", + simpleVdArray: "array", + simpleVdMap: "map", + simpleVdExt: "ext", +} + +func simpledesc(bd byte) (s string) { + s = simpledescNames[bd] + if s == "" { + s = "unknown" + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Length of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// - time.Time are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + binaryEncodingType + notJsonType + BasicHandle + + // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil + EncZeroValuesAsNil bool +} + +// Name returns the name of the handle: simple +func (h *SimpleHandle) Name() string { return "simple" } + +func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) } + +// SetBytesExt sets an extension +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, makeExt(ext)) +} diff --git a/vendor/github.com/ugorji/go/codec/simple.fastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/simple.fastpath.mono.generated.go new file mode 100644 index 000000000..329c4bfe5 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.fastpath.mono.generated.go @@ -0,0 +1,12482 @@ +//go:build !notmono && !codec.notmono && !notfastpath && !codec.notfastpath + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "slices" + "sort" +) + +type fastpathESimpleBytes struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderSimpleBytes, *encFnInfo, reflect.Value) +} +type fastpathDSimpleBytes struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderSimpleBytes, *decFnInfo, reflect.Value) +} +type fastpathEsSimpleBytes [56]fastpathESimpleBytes +type fastpathDsSimpleBytes [56]fastpathDSimpleBytes +type fastpathETSimpleBytes struct{} +type fastpathDTSimpleBytes struct{} + +func (helperEncDriverSimpleBytes) fastpathEList() *fastpathEsSimpleBytes { + var i uint = 0 + var s fastpathEsSimpleBytes + fn := func(v interface{}, fe func(*encoderSimpleBytes, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathESimpleBytes{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderSimpleBytes).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderSimpleBytes).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderSimpleBytes).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderSimpleBytes).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderSimpleBytes).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderSimpleBytes).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderSimpleBytes).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderSimpleBytes).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderSimpleBytes).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderSimpleBytes).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderSimpleBytes).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderSimpleBytes).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderSimpleBytes).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderSimpleBytes).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderSimpleBytes).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderSimpleBytes).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderSimpleBytes).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderSimpleBytes).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderSimpleBytes).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderSimpleBytes).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderSimpleBytes).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderSimpleBytes).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderSimpleBytes).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderSimpleBytes).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderSimpleBytes).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderSimpleBytes).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderSimpleBytes).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderSimpleBytes).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderSimpleBytes).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderSimpleBytes).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderSimpleBytes).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderSimpleBytes).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderSimpleBytes).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderSimpleBytes).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderSimpleBytes).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderSimpleBytes).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderSimpleBytes).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderSimpleBytes).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderSimpleBytes).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderSimpleBytes).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderSimpleBytes).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderSimpleBytes).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderSimpleBytes).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderSimpleBytes).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderSimpleBytes).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderSimpleBytes).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderSimpleBytes).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderSimpleBytes).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderSimpleBytes).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderSimpleBytes).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderSimpleBytes).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderSimpleBytes).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderSimpleBytes).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderSimpleBytes).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderSimpleBytes).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderSimpleBytes).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverSimpleBytes) fastpathDList() *fastpathDsSimpleBytes { + var i uint = 0 + var s fastpathDsSimpleBytes + fn := func(v interface{}, fd func(*decoderSimpleBytes, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDSimpleBytes{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderSimpleBytes).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderSimpleBytes).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderSimpleBytes).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderSimpleBytes).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderSimpleBytes).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderSimpleBytes).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderSimpleBytes).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderSimpleBytes).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderSimpleBytes).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderSimpleBytes).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderSimpleBytes).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderSimpleBytes).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderSimpleBytes).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderSimpleBytes).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderSimpleBytes).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderSimpleBytes).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderSimpleBytes).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderSimpleBytes).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderSimpleBytes).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderSimpleBytes).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderSimpleBytes).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderSimpleBytes).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderSimpleBytes).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderSimpleBytes).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderSimpleBytes).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderSimpleBytes).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderSimpleBytes).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderSimpleBytes).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderSimpleBytes).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderSimpleBytes).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderSimpleBytes).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderSimpleBytes).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderSimpleBytes).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderSimpleBytes).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderSimpleBytes).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderSimpleBytes).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderSimpleBytes).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderSimpleBytes).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderSimpleBytes).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderSimpleBytes).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderSimpleBytes).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderSimpleBytes).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderSimpleBytes).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderSimpleBytes).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderSimpleBytes).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderSimpleBytes).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderSimpleBytes).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderSimpleBytes).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderSimpleBytes).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderSimpleBytes).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderSimpleBytes).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderSimpleBytes).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderSimpleBytes).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderSimpleBytes).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderSimpleBytes).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderSimpleBytes).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverSimpleBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleBytes) bool { + var ft fastpathETSimpleBytes + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderSimpleBytes) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETSimpleBytes) EncSliceIntfV(v []interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceIntfV(v []interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETSimpleBytes) EncSliceStringV(v []string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceStringV(v []string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETSimpleBytes) EncSliceBytesV(v [][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceBytesV(v [][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETSimpleBytes) EncSliceFloat32V(v []float32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceFloat32V(v []float32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETSimpleBytes) EncSliceFloat64V(v []float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceFloat64V(v []float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETSimpleBytes) EncSliceUint8V(v []uint8, e *encoderSimpleBytes) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETSimpleBytes) EncAsMapSliceUint8V(v []uint8, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETSimpleBytes) EncSliceUint64V(v []uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceUint64V(v []uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETSimpleBytes) EncSliceIntV(v []int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceIntV(v []int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETSimpleBytes) EncSliceInt32V(v []int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceInt32V(v []int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETSimpleBytes) EncSliceInt64V(v []int64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceInt64V(v []int64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleBytes + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETSimpleBytes) EncSliceBoolV(v []bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleBytes) EncAsMapSliceBoolV(v []bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETSimpleBytes) EncMapStringIntfV(v map[string]interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETSimpleBytes) EncMapStringStringV(v map[string]string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETSimpleBytes) EncMapStringBytesV(v map[string][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETSimpleBytes) EncMapStringUint8V(v map[string]uint8, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETSimpleBytes) EncMapStringUint64V(v map[string]uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETSimpleBytes) EncMapStringIntV(v map[string]int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETSimpleBytes) EncMapStringInt32V(v map[string]int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETSimpleBytes) EncMapStringFloat64V(v map[string]float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETSimpleBytes) EncMapStringBoolV(v map[string]bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETSimpleBytes) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETSimpleBytes) EncMapUint8StringV(v map[uint8]string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETSimpleBytes) EncMapUint8BytesV(v map[uint8][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETSimpleBytes) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETSimpleBytes) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETSimpleBytes) EncMapUint8IntV(v map[uint8]int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETSimpleBytes) EncMapUint8Int32V(v map[uint8]int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETSimpleBytes) EncMapUint8Float64V(v map[uint8]float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETSimpleBytes) EncMapUint8BoolV(v map[uint8]bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETSimpleBytes) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETSimpleBytes) EncMapUint64StringV(v map[uint64]string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETSimpleBytes) EncMapUint64BytesV(v map[uint64][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETSimpleBytes) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETSimpleBytes) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETSimpleBytes) EncMapUint64IntV(v map[uint64]int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETSimpleBytes) EncMapUint64Int32V(v map[uint64]int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETSimpleBytes) EncMapUint64Float64V(v map[uint64]float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETSimpleBytes) EncMapUint64BoolV(v map[uint64]bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETSimpleBytes) EncMapIntIntfV(v map[int]interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETSimpleBytes) EncMapIntStringV(v map[int]string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETSimpleBytes) EncMapIntBytesV(v map[int][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETSimpleBytes) EncMapIntUint8V(v map[int]uint8, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETSimpleBytes) EncMapIntUint64V(v map[int]uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETSimpleBytes) EncMapIntIntV(v map[int]int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETSimpleBytes) EncMapIntInt32V(v map[int]int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETSimpleBytes) EncMapIntFloat64V(v map[int]float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETSimpleBytes) EncMapIntBoolV(v map[int]bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETSimpleBytes) EncMapInt32IntfV(v map[int32]interface{}, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETSimpleBytes) EncMapInt32StringV(v map[int32]string, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETSimpleBytes) EncMapInt32BytesV(v map[int32][]byte, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETSimpleBytes) EncMapInt32Uint8V(v map[int32]uint8, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETSimpleBytes) EncMapInt32Uint64V(v map[int32]uint64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETSimpleBytes) EncMapInt32IntV(v map[int32]int, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETSimpleBytes) EncMapInt32Int32V(v map[int32]int32, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETSimpleBytes) EncMapInt32Float64V(v map[int32]float64, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleBytes) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleBytes{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETSimpleBytes) EncMapInt32BoolV(v map[int32]bool, e *encoderSimpleBytes) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverSimpleBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleBytes) bool { + var ft fastpathDTSimpleBytes + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderSimpleBytes) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTSimpleBytes) DecSliceIntfY(v []interface{}, d *decoderSimpleBytes) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceIntfN(v []interface{}, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTSimpleBytes) DecSliceStringY(v []string, d *decoderSimpleBytes) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceStringN(v []string, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTSimpleBytes) DecSliceBytesY(v [][]byte, d *decoderSimpleBytes) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceBytesN(v [][]byte, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTSimpleBytes) DecSliceFloat32Y(v []float32, d *decoderSimpleBytes) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceFloat32N(v []float32, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTSimpleBytes) DecSliceFloat64Y(v []float64, d *decoderSimpleBytes) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceFloat64N(v []float64, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTSimpleBytes) DecSliceUint8Y(v []uint8, d *decoderSimpleBytes) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceUint8N(v []uint8, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderSimpleBytes) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTSimpleBytes) DecSliceUint64Y(v []uint64, d *decoderSimpleBytes) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceUint64N(v []uint64, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTSimpleBytes) DecSliceIntY(v []int, d *decoderSimpleBytes) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceIntN(v []int, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTSimpleBytes) DecSliceInt32Y(v []int32, d *decoderSimpleBytes) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceInt32N(v []int32, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTSimpleBytes) DecSliceInt64Y(v []int64, d *decoderSimpleBytes) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceInt64N(v []int64, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTSimpleBytes) DecSliceBoolY(v []bool, d *decoderSimpleBytes) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleBytes) DecSliceBoolN(v []bool, d *decoderSimpleBytes) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringStringL(v map[string]string, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringIntL(v map[string]int, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntStringL(v map[int]string, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntIntL(v map[int]int, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleBytes) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleBytes + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleBytes) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderSimpleBytes) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} + +type fastpathESimpleIO struct { + rtid uintptr + rt reflect.Type + encfn func(*encoderSimpleIO, *encFnInfo, reflect.Value) +} +type fastpathDSimpleIO struct { + rtid uintptr + rt reflect.Type + decfn func(*decoderSimpleIO, *decFnInfo, reflect.Value) +} +type fastpathEsSimpleIO [56]fastpathESimpleIO +type fastpathDsSimpleIO [56]fastpathDSimpleIO +type fastpathETSimpleIO struct{} +type fastpathDTSimpleIO struct{} + +func (helperEncDriverSimpleIO) fastpathEList() *fastpathEsSimpleIO { + var i uint = 0 + var s fastpathEsSimpleIO + fn := func(v interface{}, fe func(*encoderSimpleIO, *encFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathESimpleIO{rt2id(xrt), xrt, fe} + i++ + } + + fn([]interface{}(nil), (*encoderSimpleIO).fastpathEncSliceIntfR) + fn([]string(nil), (*encoderSimpleIO).fastpathEncSliceStringR) + fn([][]byte(nil), (*encoderSimpleIO).fastpathEncSliceBytesR) + fn([]float32(nil), (*encoderSimpleIO).fastpathEncSliceFloat32R) + fn([]float64(nil), (*encoderSimpleIO).fastpathEncSliceFloat64R) + fn([]uint8(nil), (*encoderSimpleIO).fastpathEncSliceUint8R) + fn([]uint64(nil), (*encoderSimpleIO).fastpathEncSliceUint64R) + fn([]int(nil), (*encoderSimpleIO).fastpathEncSliceIntR) + fn([]int32(nil), (*encoderSimpleIO).fastpathEncSliceInt32R) + fn([]int64(nil), (*encoderSimpleIO).fastpathEncSliceInt64R) + fn([]bool(nil), (*encoderSimpleIO).fastpathEncSliceBoolR) + + fn(map[string]interface{}(nil), (*encoderSimpleIO).fastpathEncMapStringIntfR) + fn(map[string]string(nil), (*encoderSimpleIO).fastpathEncMapStringStringR) + fn(map[string][]byte(nil), (*encoderSimpleIO).fastpathEncMapStringBytesR) + fn(map[string]uint8(nil), (*encoderSimpleIO).fastpathEncMapStringUint8R) + fn(map[string]uint64(nil), (*encoderSimpleIO).fastpathEncMapStringUint64R) + fn(map[string]int(nil), (*encoderSimpleIO).fastpathEncMapStringIntR) + fn(map[string]int32(nil), (*encoderSimpleIO).fastpathEncMapStringInt32R) + fn(map[string]float64(nil), (*encoderSimpleIO).fastpathEncMapStringFloat64R) + fn(map[string]bool(nil), (*encoderSimpleIO).fastpathEncMapStringBoolR) + fn(map[uint8]interface{}(nil), (*encoderSimpleIO).fastpathEncMapUint8IntfR) + fn(map[uint8]string(nil), (*encoderSimpleIO).fastpathEncMapUint8StringR) + fn(map[uint8][]byte(nil), (*encoderSimpleIO).fastpathEncMapUint8BytesR) + fn(map[uint8]uint8(nil), (*encoderSimpleIO).fastpathEncMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*encoderSimpleIO).fastpathEncMapUint8Uint64R) + fn(map[uint8]int(nil), (*encoderSimpleIO).fastpathEncMapUint8IntR) + fn(map[uint8]int32(nil), (*encoderSimpleIO).fastpathEncMapUint8Int32R) + fn(map[uint8]float64(nil), (*encoderSimpleIO).fastpathEncMapUint8Float64R) + fn(map[uint8]bool(nil), (*encoderSimpleIO).fastpathEncMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*encoderSimpleIO).fastpathEncMapUint64IntfR) + fn(map[uint64]string(nil), (*encoderSimpleIO).fastpathEncMapUint64StringR) + fn(map[uint64][]byte(nil), (*encoderSimpleIO).fastpathEncMapUint64BytesR) + fn(map[uint64]uint8(nil), (*encoderSimpleIO).fastpathEncMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*encoderSimpleIO).fastpathEncMapUint64Uint64R) + fn(map[uint64]int(nil), (*encoderSimpleIO).fastpathEncMapUint64IntR) + fn(map[uint64]int32(nil), (*encoderSimpleIO).fastpathEncMapUint64Int32R) + fn(map[uint64]float64(nil), (*encoderSimpleIO).fastpathEncMapUint64Float64R) + fn(map[uint64]bool(nil), (*encoderSimpleIO).fastpathEncMapUint64BoolR) + fn(map[int]interface{}(nil), (*encoderSimpleIO).fastpathEncMapIntIntfR) + fn(map[int]string(nil), (*encoderSimpleIO).fastpathEncMapIntStringR) + fn(map[int][]byte(nil), (*encoderSimpleIO).fastpathEncMapIntBytesR) + fn(map[int]uint8(nil), (*encoderSimpleIO).fastpathEncMapIntUint8R) + fn(map[int]uint64(nil), (*encoderSimpleIO).fastpathEncMapIntUint64R) + fn(map[int]int(nil), (*encoderSimpleIO).fastpathEncMapIntIntR) + fn(map[int]int32(nil), (*encoderSimpleIO).fastpathEncMapIntInt32R) + fn(map[int]float64(nil), (*encoderSimpleIO).fastpathEncMapIntFloat64R) + fn(map[int]bool(nil), (*encoderSimpleIO).fastpathEncMapIntBoolR) + fn(map[int32]interface{}(nil), (*encoderSimpleIO).fastpathEncMapInt32IntfR) + fn(map[int32]string(nil), (*encoderSimpleIO).fastpathEncMapInt32StringR) + fn(map[int32][]byte(nil), (*encoderSimpleIO).fastpathEncMapInt32BytesR) + fn(map[int32]uint8(nil), (*encoderSimpleIO).fastpathEncMapInt32Uint8R) + fn(map[int32]uint64(nil), (*encoderSimpleIO).fastpathEncMapInt32Uint64R) + fn(map[int32]int(nil), (*encoderSimpleIO).fastpathEncMapInt32IntR) + fn(map[int32]int32(nil), (*encoderSimpleIO).fastpathEncMapInt32Int32R) + fn(map[int32]float64(nil), (*encoderSimpleIO).fastpathEncMapInt32Float64R) + fn(map[int32]bool(nil), (*encoderSimpleIO).fastpathEncMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperDecDriverSimpleIO) fastpathDList() *fastpathDsSimpleIO { + var i uint = 0 + var s fastpathDsSimpleIO + fn := func(v interface{}, fd func(*decoderSimpleIO, *decFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + s[i] = fastpathDSimpleIO{rt2id(xrt), xrt, fd} + i++ + } + + fn([]interface{}(nil), (*decoderSimpleIO).fastpathDecSliceIntfR) + fn([]string(nil), (*decoderSimpleIO).fastpathDecSliceStringR) + fn([][]byte(nil), (*decoderSimpleIO).fastpathDecSliceBytesR) + fn([]float32(nil), (*decoderSimpleIO).fastpathDecSliceFloat32R) + fn([]float64(nil), (*decoderSimpleIO).fastpathDecSliceFloat64R) + fn([]uint8(nil), (*decoderSimpleIO).fastpathDecSliceUint8R) + fn([]uint64(nil), (*decoderSimpleIO).fastpathDecSliceUint64R) + fn([]int(nil), (*decoderSimpleIO).fastpathDecSliceIntR) + fn([]int32(nil), (*decoderSimpleIO).fastpathDecSliceInt32R) + fn([]int64(nil), (*decoderSimpleIO).fastpathDecSliceInt64R) + fn([]bool(nil), (*decoderSimpleIO).fastpathDecSliceBoolR) + + fn(map[string]interface{}(nil), (*decoderSimpleIO).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*decoderSimpleIO).fastpathDecMapStringStringR) + fn(map[string][]byte(nil), (*decoderSimpleIO).fastpathDecMapStringBytesR) + fn(map[string]uint8(nil), (*decoderSimpleIO).fastpathDecMapStringUint8R) + fn(map[string]uint64(nil), (*decoderSimpleIO).fastpathDecMapStringUint64R) + fn(map[string]int(nil), (*decoderSimpleIO).fastpathDecMapStringIntR) + fn(map[string]int32(nil), (*decoderSimpleIO).fastpathDecMapStringInt32R) + fn(map[string]float64(nil), (*decoderSimpleIO).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*decoderSimpleIO).fastpathDecMapStringBoolR) + fn(map[uint8]interface{}(nil), (*decoderSimpleIO).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*decoderSimpleIO).fastpathDecMapUint8StringR) + fn(map[uint8][]byte(nil), (*decoderSimpleIO).fastpathDecMapUint8BytesR) + fn(map[uint8]uint8(nil), (*decoderSimpleIO).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint64(nil), (*decoderSimpleIO).fastpathDecMapUint8Uint64R) + fn(map[uint8]int(nil), (*decoderSimpleIO).fastpathDecMapUint8IntR) + fn(map[uint8]int32(nil), (*decoderSimpleIO).fastpathDecMapUint8Int32R) + fn(map[uint8]float64(nil), (*decoderSimpleIO).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*decoderSimpleIO).fastpathDecMapUint8BoolR) + fn(map[uint64]interface{}(nil), (*decoderSimpleIO).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*decoderSimpleIO).fastpathDecMapUint64StringR) + fn(map[uint64][]byte(nil), (*decoderSimpleIO).fastpathDecMapUint64BytesR) + fn(map[uint64]uint8(nil), (*decoderSimpleIO).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint64(nil), (*decoderSimpleIO).fastpathDecMapUint64Uint64R) + fn(map[uint64]int(nil), (*decoderSimpleIO).fastpathDecMapUint64IntR) + fn(map[uint64]int32(nil), (*decoderSimpleIO).fastpathDecMapUint64Int32R) + fn(map[uint64]float64(nil), (*decoderSimpleIO).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*decoderSimpleIO).fastpathDecMapUint64BoolR) + fn(map[int]interface{}(nil), (*decoderSimpleIO).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*decoderSimpleIO).fastpathDecMapIntStringR) + fn(map[int][]byte(nil), (*decoderSimpleIO).fastpathDecMapIntBytesR) + fn(map[int]uint8(nil), (*decoderSimpleIO).fastpathDecMapIntUint8R) + fn(map[int]uint64(nil), (*decoderSimpleIO).fastpathDecMapIntUint64R) + fn(map[int]int(nil), (*decoderSimpleIO).fastpathDecMapIntIntR) + fn(map[int]int32(nil), (*decoderSimpleIO).fastpathDecMapIntInt32R) + fn(map[int]float64(nil), (*decoderSimpleIO).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*decoderSimpleIO).fastpathDecMapIntBoolR) + fn(map[int32]interface{}(nil), (*decoderSimpleIO).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*decoderSimpleIO).fastpathDecMapInt32StringR) + fn(map[int32][]byte(nil), (*decoderSimpleIO).fastpathDecMapInt32BytesR) + fn(map[int32]uint8(nil), (*decoderSimpleIO).fastpathDecMapInt32Uint8R) + fn(map[int32]uint64(nil), (*decoderSimpleIO).fastpathDecMapInt32Uint64R) + fn(map[int32]int(nil), (*decoderSimpleIO).fastpathDecMapInt32IntR) + fn(map[int32]int32(nil), (*decoderSimpleIO).fastpathDecMapInt32Int32R) + fn(map[int32]float64(nil), (*decoderSimpleIO).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*decoderSimpleIO).fastpathDecMapInt32BoolR) + + sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid }) + return &s +} + +func (helperEncDriverSimpleIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleIO) bool { + var ft fastpathETSimpleIO + switch v := iv.(type) { + case []interface{}: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntfV(v, e) + } + case []string: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceStringV(v, e) + } + case [][]byte: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBytesV(v, e) + } + case []float32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat32V(v, e) + } + case []float64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceFloat64V(v, e) + } + case []uint8: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint8V(v, e) + } + case []uint64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceUint64V(v, e) + } + case []int: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceIntV(v, e) + } + case []int32: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt32V(v, e) + } + case []int64: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceInt64V(v, e) + } + case []bool: + if v == nil { + e.e.writeNilArray() + } else { + ft.EncSliceBoolV(v, e) + } + case map[string]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntfV(v, e) + } + case map[string]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringStringV(v, e) + } + case map[string][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBytesV(v, e) + } + case map[string]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint8V(v, e) + } + case map[string]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringUint64V(v, e) + } + case map[string]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringIntV(v, e) + } + case map[string]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringInt32V(v, e) + } + case map[string]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringFloat64V(v, e) + } + case map[string]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapStringBoolV(v, e) + } + case map[uint8]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntfV(v, e) + } + case map[uint8]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8StringV(v, e) + } + case map[uint8][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BytesV(v, e) + } + case map[uint8]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint8V(v, e) + } + case map[uint8]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Uint64V(v, e) + } + case map[uint8]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8IntV(v, e) + } + case map[uint8]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Int32V(v, e) + } + case map[uint8]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8Float64V(v, e) + } + case map[uint8]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint8BoolV(v, e) + } + case map[uint64]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntfV(v, e) + } + case map[uint64]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64StringV(v, e) + } + case map[uint64][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BytesV(v, e) + } + case map[uint64]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint8V(v, e) + } + case map[uint64]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Uint64V(v, e) + } + case map[uint64]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64IntV(v, e) + } + case map[uint64]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Int32V(v, e) + } + case map[uint64]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64Float64V(v, e) + } + case map[uint64]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapUint64BoolV(v, e) + } + case map[int]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntfV(v, e) + } + case map[int]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntStringV(v, e) + } + case map[int][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBytesV(v, e) + } + case map[int]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint8V(v, e) + } + case map[int]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntUint64V(v, e) + } + case map[int]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntIntV(v, e) + } + case map[int]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntInt32V(v, e) + } + case map[int]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntFloat64V(v, e) + } + case map[int]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapIntBoolV(v, e) + } + case map[int32]interface{}: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntfV(v, e) + } + case map[int32]string: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32StringV(v, e) + } + case map[int32][]byte: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BytesV(v, e) + } + case map[int32]uint8: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint8V(v, e) + } + case map[int32]uint64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Uint64V(v, e) + } + case map[int32]int: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32IntV(v, e) + } + case map[int32]int32: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Int32V(v, e) + } + case map[int32]float64: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32Float64V(v, e) + } + case map[int32]bool: + if v == nil { + e.e.writeNilMap() + } else { + ft.EncMapInt32BoolV(v, e) + } + default: + _ = v + return false + } + return true +} + +func (e *encoderSimpleIO) fastpathEncSliceIntfR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []interface{} + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]interface{}) + } + if f.ti.mbs { + ft.EncAsMapSliceIntfV(v, e) + return + } + ft.EncSliceIntfV(v, e) +} +func (fastpathETSimpleIO) EncSliceIntfV(v []interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceIntfV(v []interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + if !e.encodeBuiltin(v[j]) { + e.encodeR(reflect.ValueOf(v[j])) + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceStringR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []string + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]string) + } + if f.ti.mbs { + ft.EncAsMapSliceStringV(v, e) + return + } + ft.EncSliceStringV(v, e) +} +func (fastpathETSimpleIO) EncSliceStringV(v []string, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceStringV(v []string, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeString(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceBytesR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v [][]byte + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([][]byte) + } + if f.ti.mbs { + ft.EncAsMapSliceBytesV(v, e) + return + } + ft.EncSliceBytesV(v, e) +} +func (fastpathETSimpleIO) EncSliceBytesV(v [][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceBytesV(v [][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBytes(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceFloat32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []float32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float32) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat32V(v, e) + return + } + ft.EncSliceFloat32V(v, e) +} +func (fastpathETSimpleIO) EncSliceFloat32V(v []float32, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceFloat32V(v []float32, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat32(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceFloat64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []float64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]float64) + } + if f.ti.mbs { + ft.EncAsMapSliceFloat64V(v, e) + return + } + ft.EncSliceFloat64V(v, e) +} +func (fastpathETSimpleIO) EncSliceFloat64V(v []float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceFloat64V(v []float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeFloat64(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceUint8R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []uint8 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint8) + } + if f.ti.mbs { + ft.EncAsMapSliceUint8V(v, e) + return + } + ft.EncSliceUint8V(v, e) +} +func (fastpathETSimpleIO) EncSliceUint8V(v []uint8, e *encoderSimpleIO) { + e.e.EncodeStringBytesRaw(v) +} +func (fastpathETSimpleIO) EncAsMapSliceUint8V(v []uint8, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(uint64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceUint64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []uint64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]uint64) + } + if f.ti.mbs { + ft.EncAsMapSliceUint64V(v, e) + return + } + ft.EncSliceUint64V(v, e) +} +func (fastpathETSimpleIO) EncSliceUint64V(v []uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceUint64V(v []uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeUint(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceIntR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []int + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int) + } + if f.ti.mbs { + ft.EncAsMapSliceIntV(v, e) + return + } + ft.EncSliceIntV(v, e) +} +func (fastpathETSimpleIO) EncSliceIntV(v []int, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceIntV(v []int, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceInt32R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []int32 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int32) + } + if f.ti.mbs { + ft.EncAsMapSliceInt32V(v, e) + return + } + ft.EncSliceInt32V(v, e) +} +func (fastpathETSimpleIO) EncSliceInt32V(v []int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceInt32V(v []int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(int64(v[j])) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceInt64R(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []int64 + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]int64) + } + if f.ti.mbs { + ft.EncAsMapSliceInt64V(v, e) + return + } + ft.EncSliceInt64V(v, e) +} +func (fastpathETSimpleIO) EncSliceInt64V(v []int64, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceInt64V(v []int64, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeInt(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncSliceBoolR(f *encFnInfo, rv reflect.Value) { + var ft fastpathETSimpleIO + var v []bool + if rv.Kind() == reflect.Array { + rvGetSlice4Array(rv, &v) + } else { + v = rv2i(rv).([]bool) + } + if f.ti.mbs { + ft.EncAsMapSliceBoolV(v, e) + return + } + ft.EncSliceBoolV(v, e) +} +func (fastpathETSimpleIO) EncSliceBoolV(v []bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(v)) + for j := range v { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteArrayEnd() +} +func (fastpathETSimpleIO) EncAsMapSliceBoolV(v []bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.c = 0 + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(len(v)) + e.mapStart(len(v) >> 1) + for j := range v { + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + } else { + e.mapElemValue() + } + e.e.EncodeBool(v[j]) + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) fastpathEncMapStringIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (fastpathETSimpleIO) EncMapStringIntfV(v map[string]interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringStringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (fastpathETSimpleIO) EncMapStringStringV(v map[string]string, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e) +} +func (fastpathETSimpleIO) EncMapStringBytesV(v map[string][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (fastpathETSimpleIO) EncMapStringUint8V(v map[string]uint8, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (fastpathETSimpleIO) EncMapStringUint64V(v map[string]uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringIntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (fastpathETSimpleIO) EncMapStringIntV(v map[string]int, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (fastpathETSimpleIO) EncMapStringInt32V(v map[string]int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (fastpathETSimpleIO) EncMapStringFloat64V(v map[string]float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapStringBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (fastpathETSimpleIO) EncMapStringBoolV(v map[string]bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeString(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (fastpathETSimpleIO) EncMapUint8IntfV(v map[uint8]interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8StringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (fastpathETSimpleIO) EncMapUint8StringV(v map[uint8]string, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e) +} +func (fastpathETSimpleIO) EncMapUint8BytesV(v map[uint8][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (fastpathETSimpleIO) EncMapUint8Uint8V(v map[uint8]uint8, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (fastpathETSimpleIO) EncMapUint8Uint64V(v map[uint8]uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8IntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (fastpathETSimpleIO) EncMapUint8IntV(v map[uint8]int, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (fastpathETSimpleIO) EncMapUint8Int32V(v map[uint8]int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (fastpathETSimpleIO) EncMapUint8Float64V(v map[uint8]float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint8BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (fastpathETSimpleIO) EncMapUint8BoolV(v map[uint8]bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint8, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(uint64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (fastpathETSimpleIO) EncMapUint64IntfV(v map[uint64]interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64StringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (fastpathETSimpleIO) EncMapUint64StringV(v map[uint64]string, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e) +} +func (fastpathETSimpleIO) EncMapUint64BytesV(v map[uint64][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (fastpathETSimpleIO) EncMapUint64Uint8V(v map[uint64]uint8, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (fastpathETSimpleIO) EncMapUint64Uint64V(v map[uint64]uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64IntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (fastpathETSimpleIO) EncMapUint64IntV(v map[uint64]int, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (fastpathETSimpleIO) EncMapUint64Int32V(v map[uint64]int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (fastpathETSimpleIO) EncMapUint64Float64V(v map[uint64]float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapUint64BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (fastpathETSimpleIO) EncMapUint64BoolV(v map[uint64]bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeUint(k2) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntIntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (fastpathETSimpleIO) EncMapIntIntfV(v map[int]interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntStringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (fastpathETSimpleIO) EncMapIntStringV(v map[int]string, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntBytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e) +} +func (fastpathETSimpleIO) EncMapIntBytesV(v map[int][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntUint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (fastpathETSimpleIO) EncMapIntUint8V(v map[int]uint8, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntUint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (fastpathETSimpleIO) EncMapIntUint64V(v map[int]uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntIntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (fastpathETSimpleIO) EncMapIntIntV(v map[int]int, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntInt32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (fastpathETSimpleIO) EncMapIntInt32V(v map[int]int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntFloat64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (fastpathETSimpleIO) EncMapIntFloat64V(v map[int]float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapIntBoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (fastpathETSimpleIO) EncMapIntBoolV(v map[int]bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32IntfR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (fastpathETSimpleIO) EncMapInt32IntfV(v map[int32]interface{}, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v[k2]) { + e.encodeR(reflect.ValueOf(v[k2])) + } + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + if !e.encodeBuiltin(v2) { + e.encodeR(reflect.ValueOf(v2)) + } + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32StringR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (fastpathETSimpleIO) EncMapInt32StringV(v map[int32]string, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeString(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32BytesR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e) +} +func (fastpathETSimpleIO) EncMapInt32BytesV(v map[int32][]byte, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBytes(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32Uint8R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (fastpathETSimpleIO) EncMapInt32Uint8V(v map[int32]uint8, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(uint64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32Uint64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (fastpathETSimpleIO) EncMapInt32Uint64V(v map[int32]uint64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeUint(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32IntR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (fastpathETSimpleIO) EncMapInt32IntV(v map[int32]int, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32Int32R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (fastpathETSimpleIO) EncMapInt32Int32V(v map[int32]int32, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v[k2])) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeInt(int64(v2)) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32Float64R(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (fastpathETSimpleIO) EncMapInt32Float64V(v map[int32]float64, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeFloat64(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} +func (e *encoderSimpleIO) fastpathEncMapInt32BoolR(f *encFnInfo, rv reflect.Value) { + fastpathETSimpleIO{}.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (fastpathETSimpleIO) EncMapInt32BoolV(v map[int32]bool, e *encoderSimpleIO) { + if len(v) == 0 { + e.e.WriteMapEmpty() + return + } + var i uint + e.mapStart(len(v)) + if e.h.Canonical { + v2 := make([]int32, len(v)) + for k := range v { + v2[i] = k + i++ + } + slices.Sort(v2) + for i, k2 := range v2 { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v[k2]) + } + } else { + i = 0 + for k2, v2 := range v { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeInt(int64(k2)) + e.mapElemValue() + e.e.EncodeBool(v2) + i++ + } + } + e.c = 0 + e.e.WriteMapEnd() +} + +func (helperDecDriverSimpleIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleIO) bool { + var ft fastpathDTSimpleIO + var changed bool + var containerLen int + switch v := iv.(type) { + case []interface{}: + ft.DecSliceIntfN(v, d) + case *[]interface{}: + var v2 []interface{} + if v2, changed = ft.DecSliceIntfY(*v, d); changed { + *v = v2 + } + case []string: + ft.DecSliceStringN(v, d) + case *[]string: + var v2 []string + if v2, changed = ft.DecSliceStringY(*v, d); changed { + *v = v2 + } + case [][]byte: + ft.DecSliceBytesN(v, d) + case *[][]byte: + var v2 [][]byte + if v2, changed = ft.DecSliceBytesY(*v, d); changed { + *v = v2 + } + case []float32: + ft.DecSliceFloat32N(v, d) + case *[]float32: + var v2 []float32 + if v2, changed = ft.DecSliceFloat32Y(*v, d); changed { + *v = v2 + } + case []float64: + ft.DecSliceFloat64N(v, d) + case *[]float64: + var v2 []float64 + if v2, changed = ft.DecSliceFloat64Y(*v, d); changed { + *v = v2 + } + case []uint8: + ft.DecSliceUint8N(v, d) + case *[]uint8: + var v2 []uint8 + if v2, changed = ft.DecSliceUint8Y(*v, d); changed { + *v = v2 + } + case []uint64: + ft.DecSliceUint64N(v, d) + case *[]uint64: + var v2 []uint64 + if v2, changed = ft.DecSliceUint64Y(*v, d); changed { + *v = v2 + } + case []int: + ft.DecSliceIntN(v, d) + case *[]int: + var v2 []int + if v2, changed = ft.DecSliceIntY(*v, d); changed { + *v = v2 + } + case []int32: + ft.DecSliceInt32N(v, d) + case *[]int32: + var v2 []int32 + if v2, changed = ft.DecSliceInt32Y(*v, d); changed { + *v = v2 + } + case []int64: + ft.DecSliceInt64N(v, d) + case *[]int64: + var v2 []int64 + if v2, changed = ft.DecSliceInt64Y(*v, d); changed { + *v = v2 + } + case []bool: + ft.DecSliceBoolN(v, d) + case *[]bool: + var v2 []bool + if v2, changed = ft.DecSliceBoolY(*v, d); changed { + *v = v2 + } + case map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapStringBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[string]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint8BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint8]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapUint64BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[uint64]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntStringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntUint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntIntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntInt32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntFloat64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapIntBoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntfL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]interface{}: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32StringL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]string: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BytesL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32][]byte: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint8L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint8: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Uint64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]uint64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32IntL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Int32L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]int32: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32Float64L(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]float64: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*v, containerLen, d) + } + d.mapEnd() + } + case map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil { + if containerLen != 0 { + ft.DecMapInt32BoolL(v, containerLen, d) + } + d.mapEnd() + } + case *map[int32]bool: + if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil { + *v = nil + } else { + if *v == nil { + *v = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*v, containerLen, d) + } + d.mapEnd() + } + default: + _ = v + return false + } + return true +} + +func (d *decoderSimpleIO) fastpathDecSliceIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]interface{}) + if vv, changed := ft.DecSliceIntfY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []interface{} + rvGetSlice4Array(rv, &v) + ft.DecSliceIntfN(v, d) + default: + ft.DecSliceIntfN(rv2i(rv).([]interface{}), d) + } +} +func (fastpathDTSimpleIO) DecSliceIntfY(v []interface{}, d *decoderSimpleIO) (v2 []interface{}, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []interface{}) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]interface{}, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + d.decode(&v[uint(j)]) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]interface{}{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceIntfN(v []interface{}, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + d.decode(&v[uint(j)]) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]string) + if vv, changed := ft.DecSliceStringY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []string + rvGetSlice4Array(rv, &v) + ft.DecSliceStringN(v, d) + default: + ft.DecSliceStringN(rv2i(rv).([]string), d) + } +} +func (fastpathDTSimpleIO) DecSliceStringY(v []string, d *decoderSimpleIO) (v2 []string, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []string) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 16)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]string, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, "")) + } + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]string{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceStringN(v []string, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.detach2Str(d.d.DecodeStringAsBytes()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[][]byte) + if vv, changed := ft.DecSliceBytesY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v [][]byte + rvGetSlice4Array(rv, &v) + ft.DecSliceBytesN(v, d) + default: + ft.DecSliceBytesN(rv2i(rv).([][]byte), d) + } +} +func (fastpathDTSimpleIO) DecSliceBytesY(v [][]byte, d *decoderSimpleIO) (v2 [][]byte, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst [][]byte) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 24)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([][]byte, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, nil)) + } + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([][]byte{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceBytesN(v [][]byte, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = bytesOKdbi(d.decodeBytesInto(v[uint(j)], false)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceFloat32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float32) + if vv, changed := ft.DecSliceFloat32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float32 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat32N(v, d) + default: + ft.DecSliceFloat32N(rv2i(rv).([]float32), d) + } +} +func (fastpathDTSimpleIO) DecSliceFloat32Y(v []float32, d *decoderSimpleIO) (v2 []float32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = float32(d.d.DecodeFloat32()) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceFloat32N(v []float32, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = float32(d.d.DecodeFloat32()) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]float64) + if vv, changed := ft.DecSliceFloat64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []float64 + rvGetSlice4Array(rv, &v) + ft.DecSliceFloat64N(v, d) + default: + ft.DecSliceFloat64N(rv2i(rv).([]float64), d) + } +} +func (fastpathDTSimpleIO) DecSliceFloat64Y(v []float64, d *decoderSimpleIO) (v2 []float64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []float64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]float64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeFloat64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]float64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceFloat64N(v []float64, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeFloat64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint8) + if vv, changed := ft.DecSliceUint8Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint8 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint8N(v, d) + default: + ft.DecSliceUint8N(rv2i(rv).([]uint8), d) + } +} +func (fastpathDTSimpleIO) DecSliceUint8Y(v []uint8, d *decoderSimpleIO) (v2 []uint8, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + if ctyp != valueTypeMap { + var dbi dBytesIntoState + v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false) + return v2, dbi != dBytesIntoParamOut + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint8) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint8, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint8{}) + } + d.mapEnd() + return v, changed +} +func (fastpathDTSimpleIO) DecSliceUint8N(v []uint8, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + if ctyp != valueTypeMap { + d.decodeBytesInto(v[:len(v):len(v)], true) + return + } + containerLenS := d.mapStart(d.d.ReadMapStart()) * 2 + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + d.mapEnd() +} + +func (d *decoderSimpleIO) fastpathDecSliceUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]uint64) + if vv, changed := ft.DecSliceUint64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []uint64 + rvGetSlice4Array(rv, &v) + ft.DecSliceUint64N(v, d) + default: + ft.DecSliceUint64N(rv2i(rv).([]uint64), d) + } +} +func (fastpathDTSimpleIO) DecSliceUint64Y(v []uint64, d *decoderSimpleIO) (v2 []uint64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []uint64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]uint64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeUint64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]uint64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceUint64N(v []uint64, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeUint64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int) + if vv, changed := ft.DecSliceIntY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int + rvGetSlice4Array(rv, &v) + ft.DecSliceIntN(v, d) + default: + ft.DecSliceIntN(rv2i(rv).([]int), d) + } +} +func (fastpathDTSimpleIO) DecSliceIntY(v []int, d *decoderSimpleIO) (v2 []int, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceIntN(v []int, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int32) + if vv, changed := ft.DecSliceInt32Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int32 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt32N(v, d) + default: + ft.DecSliceInt32N(rv2i(rv).([]int32), d) + } +} +func (fastpathDTSimpleIO) DecSliceInt32Y(v []int32, d *decoderSimpleIO) (v2 []int32, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int32) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 4)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int32, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int32{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceInt32N(v []int32, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceInt64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]int64) + if vv, changed := ft.DecSliceInt64Y(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []int64 + rvGetSlice4Array(rv, &v) + ft.DecSliceInt64N(v, d) + default: + ft.DecSliceInt64N(rv2i(rv).([]int64), d) + } +} +func (fastpathDTSimpleIO) DecSliceInt64Y(v []int64, d *decoderSimpleIO) (v2 []int64, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []int64) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 8)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]int64, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, 0)) + } + v[uint(j)] = d.d.DecodeInt64() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]int64{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceInt64N(v []int64, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeInt64() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) fastpathDecSliceBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + switch rv.Kind() { + case reflect.Ptr: + v := rv2i(rv).(*[]bool) + if vv, changed := ft.DecSliceBoolY(*v, d); changed { + *v = vv + } + case reflect.Array: + var v []bool + rvGetSlice4Array(rv, &v) + ft.DecSliceBoolN(v, d) + default: + ft.DecSliceBoolN(rv2i(rv).([]bool), d) + } +} +func (fastpathDTSimpleIO) DecSliceBoolY(v []bool, d *decoderSimpleIO) (v2 []bool, changed bool) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return nil, v != nil + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + var j int + fnv := func(dst []bool) { v, changed = dst, true } + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if containerLenS == len(v) { + } else if containerLenS < 0 || containerLenS > cap(v) { + if xlen := int(decInferLen(containerLenS, d.maxInitLen(), 1)); xlen <= cap(v) { + fnv(v[:uint(xlen)]) + } else { + v2 = make([]bool, uint(xlen)) + copy(v2, v) + fnv(v2) + } + } else { + fnv(v[:containerLenS]) + } + } + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j >= len(v) { + fnv(append(v, false)) + } + v[uint(j)] = d.d.DecodeBool() + } + if j < len(v) { + fnv(v[:uint(j)]) + } else if j == 0 && v == nil { + fnv([]bool{}) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return v, changed +} +func (fastpathDTSimpleIO) DecSliceBoolN(v []bool, d *decoderSimpleIO) { + ctyp := d.d.ContainerType() + if ctyp == valueTypeNil { + return + } + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + hasLen := containerLenS >= 0 + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if isArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + if j < len(v) { + v[uint(j)] = d.d.DecodeBool() + } else { + d.arrayCannotExpand(len(v), j+1) + d.swallow() + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} +func (d *decoderSimpleIO) fastpathDecMapStringIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]interface{}) + if *vp == nil { + *vp = make(map[string]interface{}, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleIO) fastpathDecMapStringStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]string) + if *vp == nil { + *vp = make(map[string]string, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapStringStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringStringL(v map[string]string, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleIO) fastpathDecMapStringBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string][]byte) + if *vp == nil { + *vp = make(map[string][]byte, decInferLen(containerLen, d.maxInitLen(), 40)) + } + if containerLen != 0 { + ft.DecMapStringBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringBytesL(v map[string][]byte, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleIO) fastpathDecMapStringUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint8) + if *vp == nil { + *vp = make(map[string]uint8, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringUint8L(v map[string]uint8, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleIO) fastpathDecMapStringUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]uint64) + if *vp == nil { + *vp = make(map[string]uint64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringUint64L(v map[string]uint64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleIO) fastpathDecMapStringIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int) + if *vp == nil { + *vp = make(map[string]int, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringIntL(v map[string]int, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleIO) fastpathDecMapStringInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]int32) + if *vp == nil { + *vp = make(map[string]int32, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapStringInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringInt32L(v map[string]int32, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleIO) fastpathDecMapStringFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]float64) + if *vp == nil { + *vp = make(map[string]float64, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapStringFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringFloat64L(v map[string]float64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleIO) fastpathDecMapStringBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[string]bool) + if *vp == nil { + *vp = make(map[string]bool, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapStringBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapStringBoolL(v map[string]bool, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[string]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.detach2Str(d.d.DecodeStringAsBytes()) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]interface{}) + if *vp == nil { + *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]string) + if *vp == nil { + *vp = make(map[uint8]string, decInferLen(containerLen, d.maxInitLen(), 17)) + } + if containerLen != 0 { + ft.DecMapUint8StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8StringL(v map[uint8]string, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8][]byte) + if *vp == nil { + *vp = make(map[uint8][]byte, decInferLen(containerLen, d.maxInitLen(), 25)) + } + if containerLen != 0 { + ft.DecMapUint8BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint8) + if *vp == nil { + *vp = make(map[uint8]uint8, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]uint64) + if *vp == nil { + *vp = make(map[uint8]uint64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int) + if *vp == nil { + *vp = make(map[uint8]int, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8IntL(v map[uint8]int, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]int32) + if *vp == nil { + *vp = make(map[uint8]int32, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapUint8Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]float64) + if *vp == nil { + *vp = make(map[uint8]float64, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint8Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleIO) fastpathDecMapUint8BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint8]bool) + if *vp == nil { + *vp = make(map[uint8]bool, decInferLen(containerLen, d.maxInitLen(), 2)) + } + if containerLen != 0 { + ft.DecMapUint8BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint8]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]interface{}) + if *vp == nil { + *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]string) + if *vp == nil { + *vp = make(map[uint64]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapUint64StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64StringL(v map[uint64]string, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64][]byte) + if *vp == nil { + *vp = make(map[uint64][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapUint64BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint8) + if *vp == nil { + *vp = make(map[uint64]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]uint64) + if *vp == nil { + *vp = make(map[uint64]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int) + if *vp == nil { + *vp = make(map[uint64]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64IntL(v map[uint64]int, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]int32) + if *vp == nil { + *vp = make(map[uint64]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapUint64Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]float64) + if *vp == nil { + *vp = make(map[uint64]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapUint64Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleIO) fastpathDecMapUint64BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[uint64]bool) + if *vp == nil { + *vp = make(map[uint64]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapUint64BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[uint64]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := d.d.DecodeUint64() + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleIO) fastpathDecMapIntIntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]interface{}) + if *vp == nil { + *vp = make(map[int]interface{}, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntIntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleIO) fastpathDecMapIntStringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]string) + if *vp == nil { + *vp = make(map[int]string, decInferLen(containerLen, d.maxInitLen(), 24)) + } + if containerLen != 0 { + ft.DecMapIntStringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntStringL(v map[int]string, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleIO) fastpathDecMapIntBytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int][]byte) + if *vp == nil { + *vp = make(map[int][]byte, decInferLen(containerLen, d.maxInitLen(), 32)) + } + if containerLen != 0 { + ft.DecMapIntBytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntBytesL(v map[int][]byte, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleIO) fastpathDecMapIntUint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint8) + if *vp == nil { + *vp = make(map[int]uint8, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntUint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntUint8L(v map[int]uint8, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleIO) fastpathDecMapIntUint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]uint64) + if *vp == nil { + *vp = make(map[int]uint64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntUint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntUint64L(v map[int]uint64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleIO) fastpathDecMapIntIntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int) + if *vp == nil { + *vp = make(map[int]int, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntIntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntIntL(v map[int]int, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleIO) fastpathDecMapIntInt32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]int32) + if *vp == nil { + *vp = make(map[int]int32, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapIntInt32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntInt32L(v map[int]int32, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleIO) fastpathDecMapIntFloat64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]float64) + if *vp == nil { + *vp = make(map[int]float64, decInferLen(containerLen, d.maxInitLen(), 16)) + } + if containerLen != 0 { + ft.DecMapIntFloat64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntFloat64L(v map[int]float64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleIO) fastpathDecMapIntBoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int]bool) + if *vp == nil { + *vp = make(map[int]bool, decInferLen(containerLen, d.maxInitLen(), 9)) + } + if containerLen != 0 { + ft.DecMapIntBoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapIntBoolL(v map[int]bool, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32IntfR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]interface{}) + if *vp == nil { + *vp = make(map[int32]interface{}, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32IntfL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]interface{} given stream length: ", int64(containerLen)) + } + var mv interface{} + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + v[mk] = mv + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32StringR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]string) + if *vp == nil { + *vp = make(map[int32]string, decInferLen(containerLen, d.maxInitLen(), 20)) + } + if containerLen != 0 { + ft.DecMapInt32StringL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32StringL(v map[int32]string, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]string given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.detach2Str(d.d.DecodeStringAsBytes()) + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32BytesR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32][]byte) + if *vp == nil { + *vp = make(map[int32][]byte, decInferLen(containerLen, d.maxInitLen(), 28)) + } + if containerLen != 0 { + ft.DecMapInt32BytesL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32][]byte given stream length: ", int64(containerLen)) + } + var mv []byte + mapGet := !d.h.MapValueReset + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + if mapGet { + mv = v[mk] + } else { + mv = nil + } + v[mk], _ = d.decodeBytesInto(mv, false) + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32Uint8R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint8) + if *vp == nil { + *vp = make(map[int32]uint8, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32Uint8L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint8 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32Uint64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]uint64) + if *vp == nil { + *vp = make(map[int32]uint64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Uint64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]uint64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeUint64() + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32IntR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int) + if *vp == nil { + *vp = make(map[int32]int, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32IntL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32IntL(v map[int32]int, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32Int32R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]int32) + if *vp == nil { + *vp = make(map[int32]int32, decInferLen(containerLen, d.maxInitLen(), 8)) + } + if containerLen != 0 { + ft.DecMapInt32Int32L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]int32 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32Float64R(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]float64) + if *vp == nil { + *vp = make(map[int32]float64, decInferLen(containerLen, d.maxInitLen(), 12)) + } + if containerLen != 0 { + ft.DecMapInt32Float64L(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]float64 given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeFloat64() + } +} +func (d *decoderSimpleIO) fastpathDecMapInt32BoolR(f *decFnInfo, rv reflect.Value) { + var ft fastpathDTSimpleIO + containerLen := d.mapStart(d.d.ReadMapStart()) + if rv.Kind() == reflect.Ptr { + vp, _ := rv2i(rv).(*map[int32]bool) + if *vp == nil { + *vp = make(map[int32]bool, decInferLen(containerLen, d.maxInitLen(), 5)) + } + if containerLen != 0 { + ft.DecMapInt32BoolL(*vp, containerLen, d) + } + } else if containerLen != 0 { + ft.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d) + } + d.mapEnd() +} +func (fastpathDTSimpleIO) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *decoderSimpleIO) { + if v == nil { + halt.errorInt("cannot decode into nil map[int32]bool given stream length: ", int64(containerLen)) + } + hasLen := containerLen >= 0 + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + mk := int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + d.mapElemValue() + v[mk] = d.d.DecodeBool() + } +} diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go index e8a63717e..64df60f3c 100644 --- a/vendor/github.com/ugorji/go/codec/simple.go +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -1,111 +1,65 @@ +//go:build notmono || codec.notmono + // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "io" "math" "reflect" "time" ) -const ( - _ uint8 = iota - simpleVdNil = 1 - simpleVdFalse = 2 - simpleVdTrue = 3 - simpleVdFloat32 = 4 - simpleVdFloat64 = 5 - - // each lasts for 4 (ie n, n+1, n+2, n+3) - simpleVdPosInt = 8 - simpleVdNegInt = 12 - - simpleVdTime = 24 - - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) - simpleVdString = 216 - simpleVdByteArray = 224 - simpleVdArray = 232 - simpleVdMap = 240 - simpleVdExt = 248 -) - -var simpledescNames = map[byte]string{ - simpleVdNil: "null", - simpleVdFalse: "false", - simpleVdTrue: "true", - simpleVdFloat32: "float32", - simpleVdFloat64: "float64", - - simpleVdPosInt: "+int", - simpleVdNegInt: "-int", - - simpleVdTime: "time", - - simpleVdString: "string", - simpleVdByteArray: "binary", - simpleVdArray: "array", - simpleVdMap: "map", - simpleVdExt: "ext", -} - -func simpledesc(bd byte) (s string) { - s = simpledescNames[bd] - if s == "" { - s = "unknown" - } - return -} - -type simpleEncDriver struct { +type simpleEncDriver[T encWriter] struct { noBuiltInTypes encDriverNoopContainerWriter encDriverNoState + encDriverContainerNoTrackerT + encInit2er + h *SimpleHandle + e *encoderBase // b [8]byte - e Encoder + w T } -func (e *simpleEncDriver) encoder() *Encoder { - return &e.e +func (e *simpleEncDriver[T]) EncodeNil() { + e.w.writen1(simpleVdNil) } -func (e *simpleEncDriver) EncodeNil() { - e.e.encWr.writen1(simpleVdNil) -} - -func (e *simpleEncDriver) EncodeBool(b bool) { +func (e *simpleEncDriver[T]) EncodeBool(b bool) { if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b { e.EncodeNil() return } if b { - e.e.encWr.writen1(simpleVdTrue) + e.w.writen1(simpleVdTrue) } else { - e.e.encWr.writen1(simpleVdFalse) + e.w.writen1(simpleVdFalse) } } -func (e *simpleEncDriver) EncodeFloat32(f float32) { +func (e *simpleEncDriver[T]) EncodeFloat32(f float32) { if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 { e.EncodeNil() return } - e.e.encWr.writen1(simpleVdFloat32) - bigen.writeUint32(e.e.w(), math.Float32bits(f)) + e.w.writen1(simpleVdFloat32) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) } -func (e *simpleEncDriver) EncodeFloat64(f float64) { +func (e *simpleEncDriver[T]) EncodeFloat64(f float64) { if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 { e.EncodeNil() return } - e.e.encWr.writen1(simpleVdFloat64) - bigen.writeUint64(e.e.w(), math.Float64bits(f)) + e.w.writen1(simpleVdFloat64) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) } -func (e *simpleEncDriver) EncodeInt(v int64) { +func (e *simpleEncDriver[T]) EncodeInt(v int64) { if v < 0 { e.encUint(uint64(-v), simpleVdNegInt) } else { @@ -113,62 +67,62 @@ func (e *simpleEncDriver) EncodeInt(v int64) { } } -func (e *simpleEncDriver) EncodeUint(v uint64) { +func (e *simpleEncDriver[T]) EncodeUint(v uint64) { e.encUint(v, simpleVdPosInt) } -func (e *simpleEncDriver) encUint(v uint64, bd uint8) { +func (e *simpleEncDriver[T]) encUint(v uint64, bd uint8) { if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 { e.EncodeNil() return } if v <= math.MaxUint8 { - e.e.encWr.writen2(bd, uint8(v)) + e.w.writen2(bd, uint8(v)) } else if v <= math.MaxUint16 { - e.e.encWr.writen1(bd + 1) - bigen.writeUint16(e.e.w(), uint16(v)) + e.w.writen1(bd + 1) + e.w.writen2(bigen.PutUint16(uint16(v))) } else if v <= math.MaxUint32 { - e.e.encWr.writen1(bd + 2) - bigen.writeUint32(e.e.w(), uint32(v)) + e.w.writen1(bd + 2) + e.w.writen4(bigen.PutUint32(uint32(v))) } else { // if v <= math.MaxUint64 { - e.e.encWr.writen1(bd + 3) - bigen.writeUint64(e.e.w(), v) + e.w.writen1(bd + 3) + e.w.writen8(bigen.PutUint64(v)) } } -func (e *simpleEncDriver) encLen(bd byte, length int) { +func (e *simpleEncDriver[T]) encLen(bd byte, length int) { if length == 0 { - e.e.encWr.writen1(bd) + e.w.writen1(bd) } else if length <= math.MaxUint8 { - e.e.encWr.writen1(bd + 1) - e.e.encWr.writen1(uint8(length)) + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) } else if length <= math.MaxUint16 { - e.e.encWr.writen1(bd + 2) - bigen.writeUint16(e.e.w(), uint16(length)) + e.w.writen1(bd + 2) + e.w.writen2(bigen.PutUint16(uint16(length))) } else if int64(length) <= math.MaxUint32 { - e.e.encWr.writen1(bd + 3) - bigen.writeUint32(e.e.w(), uint32(length)) + e.w.writen1(bd + 3) + e.w.writen4(bigen.PutUint32(uint32(length))) } else { - e.e.encWr.writen1(bd + 4) - bigen.writeUint64(e.e.w(), uint64(length)) + e.w.writen1(bd + 4) + e.w.writen8(bigen.PutUint64(uint64(length))) } } -func (e *simpleEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { +func (e *simpleEncDriver[T]) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { var bs0, bs []byte if ext == SelfExt { bs0 = e.e.blist.get(1024) bs = bs0 - e.e.sideEncode(v, basetype, &bs) + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) } else { bs = ext.WriteExt(v) } if bs == nil { - e.EncodeNil() + e.writeNilBytes() goto END } e.encodeExtPreamble(uint8(xtag), len(bs)) - e.e.encWr.writeb(bs) + e.w.writeb(bs) END: if ext == SelfExt { e.e.blist.put(bs) @@ -178,25 +132,35 @@ END: } } -func (e *simpleEncDriver) EncodeRawExt(re *RawExt) { +func (e *simpleEncDriver[T]) EncodeRawExt(re *RawExt) { e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.e.encWr.writeb(re.Data) + e.w.writeb(re.Data) } -func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { +func (e *simpleEncDriver[T]) encodeExtPreamble(xtag byte, length int) { e.encLen(simpleVdExt, length) - e.e.encWr.writen1(xtag) + e.w.writen1(xtag) } -func (e *simpleEncDriver) WriteArrayStart(length int) { +func (e *simpleEncDriver[T]) WriteArrayStart(length int) { e.encLen(simpleVdArray, length) } -func (e *simpleEncDriver) WriteMapStart(length int) { +func (e *simpleEncDriver[T]) WriteMapStart(length int) { e.encLen(simpleVdMap, length) } -func (e *simpleEncDriver) EncodeString(v string) { +func (e *simpleEncDriver[T]) WriteArrayEmpty() { + // e.WriteArrayStart(0) = e.encLen(simpleVdArray, 0) + e.w.writen1(simpleVdArray) +} + +func (e *simpleEncDriver[T]) WriteMapEmpty() { + // e.WriteMapStart(0) = e.encLen(simpleVdMap, 0) + e.w.writen1(simpleVdMap) +} + +func (e *simpleEncDriver[T]) EncodeString(v string) { if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" { e.EncodeNil() return @@ -206,57 +170,88 @@ func (e *simpleEncDriver) EncodeString(v string) { } else { e.encLen(simpleVdString, len(v)) } - e.e.encWr.writestr(v) + e.w.writestr(v) } -func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) { +func (e *simpleEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *simpleEncDriver[T]) EncodeStringBytesRaw(v []byte) { // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +func (e *simpleEncDriver[T]) EncodeBytes(v []byte) { if v == nil { - e.EncodeNil() + e.writeNilBytes() return } - e.encLen(simpleVdByteArray, len(v)) - e.e.encWr.writeb(v) + e.EncodeStringBytesRaw(v) } -func (e *simpleEncDriver) EncodeTime(t time.Time) { +func (e *simpleEncDriver[T]) encodeNilBytes() { + b := byte(simpleVdNil) + if e.h.NilCollectionToZeroLength { + b = simpleVdArray + } + e.w.writen1(b) +} + +func (e *simpleEncDriver[T]) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = simpleVdNil + } + e.w.writen1(v) +} + +func (e *simpleEncDriver[T]) writeNilArray() { + e.writeNilOr(simpleVdArray) +} + +func (e *simpleEncDriver[T]) writeNilMap() { + e.writeNilOr(simpleVdMap) +} + +func (e *simpleEncDriver[T]) writeNilBytes() { + e.writeNilOr(simpleVdByteArray) +} + +func (e *simpleEncDriver[T]) EncodeTime(t time.Time) { // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() { if t.IsZero() { e.EncodeNil() return } v, err := t.MarshalBinary() - e.e.onerror(err) - e.e.encWr.writen2(simpleVdTime, uint8(len(v))) - e.e.encWr.writeb(v) + halt.onerror(err) + e.w.writen2(simpleVdTime, uint8(len(v))) + e.w.writeb(v) } //------------------------------------ -type simpleDecDriver struct { +type simpleDecDriver[T decReader] struct { h *SimpleHandle + d *decoderBase + r T + bdAndBdread - _ bool + // bytes bool + noBuiltInTypes + // decDriverNoopNumberHelper decDriverNoopContainerReader - decDriverNoopNumberHelper - d Decoder + decInit2er + + // ds interface{} // must be *decoder[simpleDecDriverM[bytes...]] } -func (d *simpleDecDriver) decoder() *Decoder { - return &d.d -} - -func (d *simpleDecDriver) descBd() string { - return sprintf("%v (%s)", d.bd, simpledesc(d.bd)) -} - -func (d *simpleDecDriver) readNextBd() { - d.bd = d.d.decRd.readn1() +func (d *simpleDecDriver[T]) readNextBd() { + d.bd = d.r.readn1() d.bdRead = true } -func (d *simpleDecDriver) advanceNil() (null bool) { +func (d *simpleDecDriver[T]) advanceNil() (null bool) { if !d.bdRead { d.readNextBd() } @@ -267,7 +262,7 @@ func (d *simpleDecDriver) advanceNil() (null bool) { return } -func (d *simpleDecDriver) ContainerType() (vt valueType) { +func (d *simpleDecDriver[T]) ContainerType() (vt valueType) { if !d.bdRead { d.readNextBd() } @@ -291,88 +286,90 @@ func (d *simpleDecDriver) ContainerType() (vt valueType) { return valueTypeUnset } -func (d *simpleDecDriver) TryNil() bool { +func (d *simpleDecDriver[T]) TryNil() bool { return d.advanceNil() } -func (d *simpleDecDriver) decFloat() (f float64, ok bool) { +func (d *simpleDecDriver[T]) decFloat() (f float64, ok bool) { ok = true switch d.bd { case simpleVdFloat32: - f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4()))) + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) case simpleVdFloat64: - f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8())) + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) default: ok = false } return } -func (d *simpleDecDriver) decInteger() (ui uint64, neg, ok bool) { +func (d *simpleDecDriver[T]) decInteger() (ui uint64, neg, ok bool) { ok = true switch d.bd { case simpleVdPosInt: - ui = uint64(d.d.decRd.readn1()) + ui = uint64(d.r.readn1()) case simpleVdPosInt + 1: - ui = uint64(bigen.Uint16(d.d.decRd.readn2())) + ui = uint64(bigen.Uint16(d.r.readn2())) case simpleVdPosInt + 2: - ui = uint64(bigen.Uint32(d.d.decRd.readn4())) + ui = uint64(bigen.Uint32(d.r.readn4())) case simpleVdPosInt + 3: - ui = uint64(bigen.Uint64(d.d.decRd.readn8())) + ui = uint64(bigen.Uint64(d.r.readn8())) case simpleVdNegInt: - ui = uint64(d.d.decRd.readn1()) + ui = uint64(d.r.readn1()) neg = true case simpleVdNegInt + 1: - ui = uint64(bigen.Uint16(d.d.decRd.readn2())) + ui = uint64(bigen.Uint16(d.r.readn2())) neg = true case simpleVdNegInt + 2: - ui = uint64(bigen.Uint32(d.d.decRd.readn4())) + ui = uint64(bigen.Uint32(d.r.readn4())) neg = true case simpleVdNegInt + 3: - ui = uint64(bigen.Uint64(d.d.decRd.readn8())) + ui = uint64(bigen.Uint64(d.r.readn8())) neg = true default: ok = false - // d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + // halt.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) } // DO NOT do this check below, because callers may only want the unsigned value: // // if ui > math.MaxInt64 { - // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // halt.errorf("decIntAny: Integer out of range for signed int64: %v", ui) // return // } return } -func (d *simpleDecDriver) DecodeInt64() (i int64) { +func (d *simpleDecDriver[T]) DecodeInt64() (i int64) { if d.advanceNil() { return } - i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger()) + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false) d.bdRead = false return } -func (d *simpleDecDriver) DecodeUint64() (ui uint64) { +func (d *simpleDecDriver[T]) DecodeUint64() (ui uint64) { if d.advanceNil() { return } - ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger()) + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) d.bdRead = false return } -func (d *simpleDecDriver) DecodeFloat64() (f float64) { +func (d *simpleDecDriver[T]) DecodeFloat64() (f float64) { if d.advanceNil() { return } - f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat()) + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) DecodeBool() (b bool) { +func (d *simpleDecDriver[T]) DecodeBool() (b bool) { if d.advanceNil() { return } @@ -380,13 +377,13 @@ func (d *simpleDecDriver) DecodeBool() (b bool) { } else if d.bd == simpleVdTrue { b = true } else { - d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) + halt.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) } d.bdRead = false return } -func (d *simpleDecDriver) ReadMapStart() (length int) { +func (d *simpleDecDriver[T]) ReadMapStart() (length int) { if d.advanceNil() { return containerLenNil } @@ -394,7 +391,7 @@ func (d *simpleDecDriver) ReadMapStart() (length int) { return d.decLen() } -func (d *simpleDecDriver) ReadArrayStart() (length int) { +func (d *simpleDecDriver[T]) ReadArrayStart() (length int) { if d.advanceNil() { return containerLenNil } @@ -402,131 +399,128 @@ func (d *simpleDecDriver) ReadArrayStart() (length int) { return d.decLen() } -func (d *simpleDecDriver) uint2Len(ui uint64) int { +func (d *simpleDecDriver[T]) uint2Len(ui uint64) int { if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("overflow integer: %v", ui) + halt.errorf("overflow integer: %v", ui) } return int(ui) } -func (d *simpleDecDriver) decLen() int { +func (d *simpleDecDriver[T]) decLen() int { switch d.bd & 7 { // d.bd % 8 { case 0: return 0 case 1: - return int(d.d.decRd.readn1()) + return int(d.r.readn1()) case 2: - return int(bigen.Uint16(d.d.decRd.readn2())) + return int(bigen.Uint16(d.r.readn2())) case 3: - return d.uint2Len(uint64(bigen.Uint32(d.d.decRd.readn4()))) + return d.uint2Len(uint64(bigen.Uint32(d.r.readn4()))) case 4: - return d.uint2Len(bigen.Uint64(d.d.decRd.readn8())) + return d.uint2Len(bigen.Uint64(d.r.readn8())) } - d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + halt.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } -func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { - return d.DecodeBytes(nil) +func (d *simpleDecDriver[T]) DecodeStringAsBytes() ([]byte, dBytesAttachState) { + return d.DecodeBytes() } -func (d *simpleDecDriver) DecodeBytes(bs []byte) (bsOut []byte) { - d.d.decByteState = decByteStateNone +func (d *simpleDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) { if d.advanceNil() { return } + var cond bool // check if an "array" of uint8's (see ContainerType for how to infer if an array) - if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 { - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } + if d.bd >= simpleVdArray && d.bd <= simpleVdArray+4 { slen := d.ReadArrayStart() - var changed bool - if bs, changed = usableByteSlice(bs, slen); changed { - d.d.decByteState = decByteStateNone - } + bs, cond = usableByteSlice(d.d.buf, slen) for i := 0; i < len(bs); i++ { bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) } for i := len(bs); i < slen; i++ { bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) } - return bs + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + + return } clen := d.decLen() d.bdRead = false - if d.d.zerocopy() { - d.d.decByteState = decByteStateZerocopy - return d.d.decRd.rb.readx(uint(clen)) - } - if bs == nil { - d.d.decByteState = decByteStateReuseBuf - bs = d.d.b[:] - } - return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs) + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return } -func (d *simpleDecDriver) DecodeTime() (t time.Time) { +func (d *simpleDecDriver[T]) DecodeTime() (t time.Time) { if d.advanceNil() { return } if d.bd != simpleVdTime { - d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) + halt.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) } d.bdRead = false - clen := uint(d.d.decRd.readn1()) - b := d.d.decRd.readx(clen) - d.d.onerror((&t).UnmarshalBinary(b)) + clen := uint(d.r.readn1()) + b := d.r.readx(clen) + halt.onerror((&t).UnmarshalBinary(b)) return } -func (d *simpleDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { - if xtag > 0xff { - d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) - } - if d.advanceNil() { +func (d *simpleDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { return } - xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag := uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.setData(xbs, zerocopy) - } else if ext == SelfExt { - d.d.sideDecode(rv, basetype, xbs) + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) } else { ext.ReadExt(rv, xbs) } } -func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) { +func (d *simpleDecDriver[T]) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *simpleDecDriver[T]) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) switch d.bd { case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: l := d.decLen() - xtag = d.d.decRd.readn1() + xtag = d.r.readn1() if verifyTag && xtag != tag { - d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - if d.d.bytes { - xbs = d.d.decRd.rb.readx(uint(l)) - zerocopy = true - } else { - xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:]) + halt.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) } + xbs, ok = d.r.readxb(uint(l)) + bstate = d.d.attachState(ok) case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs = d.DecodeBytes(nil) + xbs, bstate = d.DecodeBytes() default: - d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) + halt.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) } d.bdRead = false + ok = true return } -func (d *simpleDecDriver) DecodeNaked() { +func (d *simpleDecDriver[T]) DecodeNaked() { if !d.bdRead { d.readNextBd() } @@ -566,19 +560,20 @@ func (d *simpleDecDriver) DecodeNaked() { case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: n.v = valueTypeString - n.s = d.d.stringZC(d.DecodeStringAsBytes()) + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - d.d.fauxUnionReadRawBytes(false) + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy) case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: n.v = valueTypeExt l := d.decLen() - n.u = uint64(d.d.decRd.readn1()) - if d.d.bytes { - n.l = d.d.decRd.rb.readx(uint(l)) - } else { - n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:]) - } + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(uint(l)) + // MARKER: not necessary to detach for extensions + // var useBuf bool + // n.l, useBuf = d.r.readxb(uint(l)) + // n.a = d.d.attachState(useBuf) + // n.l = d.d.detach2Bytes(n.l, nil, n.a) case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: n.v = valueTypeArray @@ -587,7 +582,7 @@ func (d *simpleDecDriver) DecodeNaked() { n.v = valueTypeMap decodeFurther = true default: - d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) + halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) } if !decodeFurther { @@ -595,32 +590,18 @@ func (d *simpleDecDriver) DecodeNaked() { } } -func (d *simpleDecDriver) nextValueBytes(v0 []byte) (v []byte) { +func (d *simpleDecDriver[T]) nextValueBytes() (v []byte) { if !d.bdRead { d.readNextBd() } - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - var cursor = d.d.rb.c - 1 - h.append1(&v, d.bd) - v = d.nextValueBytesBdReadR(v) + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() d.bdRead = false - h.bytesRdV(&v, cursor) return } -func (d *simpleDecDriver) nextValueBytesR(v0 []byte) (v []byte) { - d.readNextBd() - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - h.append1(&v, d.bd) - return d.nextValueBytesBdReadR(v) -} - -func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { - v = v0 - var h = decNextValueBytesHelper{d: &d.d} - +func (d *simpleDecDriver[T]) nextValueBytesBdReadR() { c := d.bd var length uint @@ -629,38 +610,33 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray: // pass case simpleVdPosInt, simpleVdNegInt: - h.append1(&v, d.d.decRd.readn1()) + d.r.readn1() case simpleVdPosInt + 1, simpleVdNegInt + 1: - h.appendN(&v, d.d.decRd.readx(2)...) + d.r.skip(2) case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32: - h.appendN(&v, d.d.decRd.readx(4)...) + d.r.skip(4) case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64: - h.appendN(&v, d.d.decRd.readx(8)...) + d.r.skip(8) case simpleVdTime: - c = d.d.decRd.readn1() - h.append1(&v, c) - h.appendN(&v, d.d.decRd.readx(uint(c))...) + c = d.r.readn1() + d.r.skip(uint(c)) default: switch c & 7 { // c % 8 { case 0: length = 0 case 1: - b := d.d.decRd.readn1() + b := d.r.readn1() length = uint(b) - h.append1(&v, b) case 2: - x := d.d.decRd.readn2() + x := d.r.readn2() length = uint(bigen.Uint16(x)) - h.appendN(&v, x[:]...) case 3: - x := d.d.decRd.readn4() + x := d.r.readn4() length = uint(bigen.Uint32(x)) - h.appendN(&v, x[:]...) case 4: - x := d.d.decRd.readn8() + x := d.r.readn8() length = uint(bigen.Uint64(x)) - h.appendN(&v, x[:]...) } bExt := c >= simpleVdExt && c <= simpleVdExt+7 @@ -670,11 +646,11 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { bMap := c >= simpleVdMap && c <= simpleVdMap+7 if !(bExt || bStr || bByteArray || bArray || bMap) { - d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, c) + halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, c) } if bExt { - h.append1(&v, d.d.decRd.readn1()) // tag + d.r.readn1() // tag } if length == 0 { @@ -683,68 +659,91 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) { if bArray { for i := uint(0); i < length; i++ { - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() } } else if bMap { for i := uint(0); i < length; i++ { - v = d.nextValueBytesR(v) - v = d.nextValueBytesR(v) + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() } } else { - h.appendN(&v, d.d.decRd.readx(length)...) + d.r.skip(length) } } return } -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. +// ---- // -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Length of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// - time.Time are encoded as [bd] [length] [byte]... +// The following below are similar across all format files (except for the format name). // -// The full spec will be published soon. -type SimpleHandle struct { - binaryEncodingType - BasicHandle - // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil - EncZeroValuesAsNil bool +// We keep them together here, so that we can easily copy and compare. + +// ---- + +func (d *simpleEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*SimpleHandle) + d.e = shared + if shared.bytes { + fp = simpleFpEncBytes + } else { + fp = simpleFpEncIO + } + // d.w.init() + d.init2(enc) + return } -// Name returns the name of the handle: simple -func (h *SimpleHandle) Name() string { return "simple" } +func (e *simpleEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) } -func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) } +func (e *simpleEncDriver[T]) writerEnd() { e.w.end() } -func (h *SimpleHandle) newEncDriver() encDriver { - var e = &simpleEncDriver{h: h} - e.e.e = e - e.e.init(h) - e.reset() - return e +func (e *simpleEncDriver[T]) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) } -func (h *SimpleHandle) newDecDriver() decDriver { - d := &simpleDecDriver{h: h} - d.d.d = d - d.d.init(h) - d.reset() - return d +func (e *simpleEncDriver[T]) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) } -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) +// ---- + +func (d *simpleDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*SimpleHandle) + d.d = shared + if shared.bytes { + fp = simpleFpDecBytes + } else { + fp = simpleFpDecIO + } + // d.r.init() + d.init2(dec) + return +} + +func (d *simpleDecDriver[T]) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *simpleDecDriver[T]) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *simpleDecDriver[T]) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +// ---- (custom stanza) + +func (d *simpleDecDriver[T]) descBd() string { + return sprintf("%v (%s)", d.bd, simpledesc(d.bd)) +} + +func (d *simpleDecDriver[T]) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} diff --git a/vendor/github.com/ugorji/go/codec/simple.mono.generated.go b/vendor/github.com/ugorji/go/codec/simple.mono.generated.go new file mode 100644 index 000000000..ff1b02238 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.mono.generated.go @@ -0,0 +1,7549 @@ +//go:build !notmono && !codec.notmono + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + + "io" + "math" + "reflect" + "slices" + "sort" + "strconv" + "sync" + "time" +) + +type helperEncDriverSimpleBytes struct{} +type encFnSimpleBytes struct { + i encFnInfo + fe func(*encoderSimpleBytes, *encFnInfo, reflect.Value) +} +type encRtidFnSimpleBytes struct { + rtid uintptr + fn *encFnSimpleBytes +} +type encoderSimpleBytes struct { + dh helperEncDriverSimpleBytes + fp *fastpathEsSimpleBytes + e simpleEncDriverBytes + encoderBase +} +type helperDecDriverSimpleBytes struct{} +type decFnSimpleBytes struct { + i decFnInfo + fd func(*decoderSimpleBytes, *decFnInfo, reflect.Value) +} +type decRtidFnSimpleBytes struct { + rtid uintptr + fn *decFnSimpleBytes +} +type decoderSimpleBytes struct { + dh helperDecDriverSimpleBytes + fp *fastpathDsSimpleBytes + d simpleDecDriverBytes + decoderBase +} +type simpleEncDriverBytes struct { + noBuiltInTypes + encDriverNoopContainerWriter + encDriverNoState + encDriverContainerNoTrackerT + encInit2er + + h *SimpleHandle + e *encoderBase + + w bytesEncAppender +} +type simpleDecDriverBytes struct { + h *SimpleHandle + d *decoderBase + r bytesDecReader + + bdAndBdread + + noBuiltInTypes + + decDriverNoopContainerReader + decInit2er +} + +func (e *encoderSimpleBytes) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderSimpleBytes) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderSimpleBytes) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderSimpleBytes) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderSimpleBytes) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderSimpleBytes) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderSimpleBytes) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderSimpleBytes) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderSimpleBytes) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderSimpleBytes) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderSimpleBytes) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderSimpleBytes) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderSimpleBytes) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderSimpleBytes) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderSimpleBytes) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderSimpleBytes) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderSimpleBytes) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderSimpleBytes) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderSimpleBytes) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderSimpleBytes) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderSimpleBytes) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderSimpleBytes) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderSimpleBytes) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderSimpleBytes) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderSimpleBytes) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderSimpleBytes) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderSimpleBytes) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderSimpleBytes) kSeqFn(rt reflect.Type) (fn *encFnSimpleBytes) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderSimpleBytes) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnSimpleBytes + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderSimpleBytes) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnSimpleBytes + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderSimpleBytes) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderSimpleBytes) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderSimpleBytes) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderSimpleBytes) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderSimpleBytes) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderSimpleBytes) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderSimpleBytes) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderSimpleBytes) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnSimpleBytes + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleBytes) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnSimpleBytes) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderSimpleBytes) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsSimpleBytes) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderSimpleBytes) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderSimpleBytes) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderSimpleBytes) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderSimpleBytes) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderSimpleBytes) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderSimpleBytes) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderSimpleBytes) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderSimpleBytes) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderSimpleBytes) encodeValue(rv reflect.Value, fn *encFnSimpleBytes) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderSimpleBytes) encodeValueNonNil(rv reflect.Value, fn *encFnSimpleBytes) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderSimpleBytes) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderSimpleBytes) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderSimpleBytes) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderSimpleBytes) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderSimpleBytes) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderSimpleBytes) fn(t reflect.Type) *encFnSimpleBytes { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderSimpleBytes) fnNoExt(t reflect.Type) *encFnSimpleBytes { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderSimpleBytes) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderSimpleBytes) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderSimpleBytes) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderSimpleBytes) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderSimpleBytes) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderSimpleBytes) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderSimpleBytes) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderSimpleBytes) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverSimpleBytes) newEncoderBytes(out *[]byte, h Handle) *encoderSimpleBytes { + var c1 encoderSimpleBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverSimpleBytes) newEncoderIO(out io.Writer, h Handle) *encoderSimpleBytes { + var c1 encoderSimpleBytes + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverSimpleBytes) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsSimpleBytes) (f *fastpathESimpleBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverSimpleBytes) encFindRtidFn(s []encRtidFnSimpleBytes, rtid uintptr) (i uint, fn *encFnSimpleBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverSimpleBytes) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnSimpleBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnSimpleBytes](v)) + } + return +} + +func (dh helperEncDriverSimpleBytes) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsSimpleBytes, checkExt bool) (fn *encFnSimpleBytes) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverSimpleBytes) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsSimpleBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnSimpleBytes) { + rtid := rt2id(rt) + var sp []encRtidFnSimpleBytes = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverSimpleBytes) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsSimpleBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnSimpleBytes) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnSimpleBytes + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnSimpleBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnSimpleBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnSimpleBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverSimpleBytes) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsSimpleBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnSimpleBytes) { + fn = new(encFnSimpleBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderSimpleBytes).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderSimpleBytes).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderSimpleBytes).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderSimpleBytes).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderSimpleBytes).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderSimpleBytes).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderSimpleBytes).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderSimpleBytes).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderSimpleBytes, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderSimpleBytes).kBool + case reflect.String: + + fn.fe = (*encoderSimpleBytes).kString + case reflect.Int: + fn.fe = (*encoderSimpleBytes).kInt + case reflect.Int8: + fn.fe = (*encoderSimpleBytes).kInt8 + case reflect.Int16: + fn.fe = (*encoderSimpleBytes).kInt16 + case reflect.Int32: + fn.fe = (*encoderSimpleBytes).kInt32 + case reflect.Int64: + fn.fe = (*encoderSimpleBytes).kInt64 + case reflect.Uint: + fn.fe = (*encoderSimpleBytes).kUint + case reflect.Uint8: + fn.fe = (*encoderSimpleBytes).kUint8 + case reflect.Uint16: + fn.fe = (*encoderSimpleBytes).kUint16 + case reflect.Uint32: + fn.fe = (*encoderSimpleBytes).kUint32 + case reflect.Uint64: + fn.fe = (*encoderSimpleBytes).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderSimpleBytes).kUintptr + case reflect.Float32: + fn.fe = (*encoderSimpleBytes).kFloat32 + case reflect.Float64: + fn.fe = (*encoderSimpleBytes).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderSimpleBytes).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderSimpleBytes).kComplex128 + case reflect.Chan: + fn.fe = (*encoderSimpleBytes).kChan + case reflect.Slice: + fn.fe = (*encoderSimpleBytes).kSlice + case reflect.Array: + fn.fe = (*encoderSimpleBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderSimpleBytes).kStructSimple + } else { + fn.fe = (*encoderSimpleBytes).kStruct + } + case reflect.Map: + fn.fe = (*encoderSimpleBytes).kMap + case reflect.Interface: + + fn.fe = (*encoderSimpleBytes).kErr + default: + + fn.fe = (*encoderSimpleBytes).kErr + } + } + } + return +} +func (d *decoderSimpleBytes) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderSimpleBytes) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderSimpleBytes) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderSimpleBytes) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderSimpleBytes) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderSimpleBytes) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderSimpleBytes) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderSimpleBytes) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderSimpleBytes) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderSimpleBytes) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderSimpleBytes) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderSimpleBytes) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderSimpleBytes) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderSimpleBytes) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderSimpleBytes) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderSimpleBytes) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderSimpleBytes) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderSimpleBytes) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderSimpleBytes) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderSimpleBytes) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderSimpleBytes) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderSimpleBytes) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderSimpleBytes) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderSimpleBytes) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderSimpleBytes) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderSimpleBytes) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderSimpleBytes) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderSimpleBytes) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderSimpleBytes) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderSimpleBytes) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderSimpleBytes) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderSimpleBytes) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderSimpleBytes) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnSimpleBytes + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderSimpleBytes) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnSimpleBytes + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleBytes) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnSimpleBytes + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderSimpleBytes) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnSimpleBytes + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderSimpleBytes) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsSimpleBytes) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderSimpleBytes) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderSimpleBytes) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderSimpleBytes) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderSimpleBytes) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderSimpleBytes) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderSimpleBytes) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderSimpleBytes) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderSimpleBytes) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderSimpleBytes) Release() {} + +func (d *decoderSimpleBytes) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderSimpleBytes) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderSimpleBytes) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderSimpleBytes) decodeValue(rv reflect.Value, fn *decFnSimpleBytes) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderSimpleBytes) decodeValueNoCheckNil(rv reflect.Value, fn *decFnSimpleBytes) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderSimpleBytes) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderSimpleBytes) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderSimpleBytes) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderSimpleBytes) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderSimpleBytes) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderSimpleBytes) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderSimpleBytes) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderSimpleBytes) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderSimpleBytes) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderSimpleBytes) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderSimpleBytes) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderSimpleBytes) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderSimpleBytes) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderSimpleBytes) fn(t reflect.Type) *decFnSimpleBytes { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderSimpleBytes) fnNoExt(t reflect.Type) *decFnSimpleBytes { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverSimpleBytes) newDecoderBytes(in []byte, h Handle) *decoderSimpleBytes { + var c1 decoderSimpleBytes + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverSimpleBytes) newDecoderIO(in io.Reader, h Handle) *decoderSimpleBytes { + var c1 decoderSimpleBytes + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverSimpleBytes) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsSimpleBytes) (f *fastpathDSimpleBytes, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverSimpleBytes) decFindRtidFn(s []decRtidFnSimpleBytes, rtid uintptr) (i uint, fn *decFnSimpleBytes) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverSimpleBytes) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnSimpleBytes) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnSimpleBytes](v)) + } + return +} + +func (dh helperDecDriverSimpleBytes) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsSimpleBytes, + checkExt bool) (fn *decFnSimpleBytes) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverSimpleBytes) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsSimpleBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnSimpleBytes) { + rtid := rt2id(rt) + var sp []decRtidFnSimpleBytes = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverSimpleBytes) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsSimpleBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnSimpleBytes) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnSimpleBytes + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnSimpleBytes{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnSimpleBytes, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnSimpleBytes{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverSimpleBytes) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsSimpleBytes, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnSimpleBytes) { + fn = new(decFnSimpleBytes) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderSimpleBytes).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderSimpleBytes).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderSimpleBytes).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderSimpleBytes).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderSimpleBytes).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderSimpleBytes).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderSimpleBytes).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderSimpleBytes).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderSimpleBytes, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderSimpleBytes, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderSimpleBytes).kBool + case reflect.String: + fn.fd = (*decoderSimpleBytes).kString + case reflect.Int: + fn.fd = (*decoderSimpleBytes).kInt + case reflect.Int8: + fn.fd = (*decoderSimpleBytes).kInt8 + case reflect.Int16: + fn.fd = (*decoderSimpleBytes).kInt16 + case reflect.Int32: + fn.fd = (*decoderSimpleBytes).kInt32 + case reflect.Int64: + fn.fd = (*decoderSimpleBytes).kInt64 + case reflect.Uint: + fn.fd = (*decoderSimpleBytes).kUint + case reflect.Uint8: + fn.fd = (*decoderSimpleBytes).kUint8 + case reflect.Uint16: + fn.fd = (*decoderSimpleBytes).kUint16 + case reflect.Uint32: + fn.fd = (*decoderSimpleBytes).kUint32 + case reflect.Uint64: + fn.fd = (*decoderSimpleBytes).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderSimpleBytes).kUintptr + case reflect.Float32: + fn.fd = (*decoderSimpleBytes).kFloat32 + case reflect.Float64: + fn.fd = (*decoderSimpleBytes).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderSimpleBytes).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderSimpleBytes).kComplex128 + case reflect.Chan: + fn.fd = (*decoderSimpleBytes).kChan + case reflect.Slice: + fn.fd = (*decoderSimpleBytes).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderSimpleBytes).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderSimpleBytes).kStructSimple + } else { + fn.fd = (*decoderSimpleBytes).kStruct + } + case reflect.Map: + fn.fd = (*decoderSimpleBytes).kMap + case reflect.Interface: + + fn.fd = (*decoderSimpleBytes).kInterface + default: + + fn.fd = (*decoderSimpleBytes).kErr + } + } + } + return +} +func (e *simpleEncDriverBytes) EncodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriverBytes) EncodeBool(b bool) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b { + e.EncodeNil() + return + } + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriverBytes) EncodeFloat32(f float32) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } + e.w.writen1(simpleVdFloat32) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) +} + +func (e *simpleEncDriverBytes) EncodeFloat64(f float64) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } + e.w.writen1(simpleVdFloat64) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) +} + +func (e *simpleEncDriverBytes) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriverBytes) EncodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriverBytes) encUint(v uint64, bd uint8) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 { + e.EncodeNil() + return + } + if v <= math.MaxUint8 { + e.w.writen2(bd, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 1) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 2) + e.w.writen4(bigen.PutUint32(uint32(v))) + } else { + e.w.writen1(bd + 3) + e.w.writen8(bigen.PutUint64(v)) + } +} + +func (e *simpleEncDriverBytes) encLen(bd byte, length int) { + if length == 0 { + e.w.writen1(bd) + } else if length <= math.MaxUint8 { + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + } else if length <= math.MaxUint16 { + e.w.writen1(bd + 2) + e.w.writen2(bigen.PutUint16(uint16(length))) + } else if int64(length) <= math.MaxUint32 { + e.w.writen1(bd + 3) + e.w.writen4(bigen.PutUint32(uint32(length))) + } else { + e.w.writen1(bd + 4) + e.w.writen8(bigen.PutUint64(uint64(length))) + } +} + +func (e *simpleEncDriverBytes) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + var bs0, bs []byte + if ext == SelfExt { + bs0 = e.e.blist.get(1024) + bs = bs0 + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) + } else { + bs = ext.WriteExt(v) + } + if bs == nil { + e.writeNilBytes() + goto END + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +END: + if ext == SelfExt { + e.e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.e.blist.put(bs0) + } + } +} + +func (e *simpleEncDriverBytes) EncodeRawExt(re *RawExt) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *simpleEncDriverBytes) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriverBytes) WriteArrayStart(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriverBytes) WriteMapStart(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriverBytes) WriteArrayEmpty() { + + e.w.writen1(simpleVdArray) +} + +func (e *simpleEncDriverBytes) WriteMapEmpty() { + + e.w.writen1(simpleVdMap) +} + +func (e *simpleEncDriverBytes) EncodeString(v string) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" { + e.EncodeNil() + return + } + if e.h.StringToRaw { + e.encLen(simpleVdByteArray, len(v)) + } else { + e.encLen(simpleVdString, len(v)) + } + e.w.writestr(v) +} + +func (e *simpleEncDriverBytes) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *simpleEncDriverBytes) EncodeStringBytesRaw(v []byte) { + + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +func (e *simpleEncDriverBytes) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *simpleEncDriverBytes) encodeNilBytes() { + b := byte(simpleVdNil) + if e.h.NilCollectionToZeroLength { + b = simpleVdArray + } + e.w.writen1(b) +} + +func (e *simpleEncDriverBytes) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = simpleVdNil + } + e.w.writen1(v) +} + +func (e *simpleEncDriverBytes) writeNilArray() { + e.writeNilOr(simpleVdArray) +} + +func (e *simpleEncDriverBytes) writeNilMap() { + e.writeNilOr(simpleVdMap) +} + +func (e *simpleEncDriverBytes) writeNilBytes() { + e.writeNilOr(simpleVdByteArray) +} + +func (e *simpleEncDriverBytes) EncodeTime(t time.Time) { + + if t.IsZero() { + e.EncodeNil() + return + } + v, err := t.MarshalBinary() + halt.onerror(err) + e.w.writen2(simpleVdTime, uint8(len(v))) + e.w.writeb(v) +} + +func (d *simpleDecDriverBytes) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *simpleDecDriverBytes) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return +} + +func (d *simpleDecDriverBytes) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdNil: + d.bdRead = false + return valueTypeNil + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + return valueTypeBytes + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + return valueTypeString + case simpleVdArray, simpleVdArray + 1, + simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + return valueTypeArray + case simpleVdMap, simpleVdMap + 1, + simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + return valueTypeMap + } + return valueTypeUnset +} + +func (d *simpleDecDriverBytes) TryNil() bool { + return d.advanceNil() +} + +func (d *simpleDecDriverBytes) decFloat() (f float64, ok bool) { + ok = true + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + case simpleVdFloat64: + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + default: + ok = false + } + return +} + +func (d *simpleDecDriverBytes) decInteger() (ui uint64, neg, ok bool) { + ok = true + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + case simpleVdPosInt + 1: + ui = uint64(bigen.Uint16(d.r.readn2())) + case simpleVdPosInt + 2: + ui = uint64(bigen.Uint32(d.r.readn4())) + case simpleVdPosInt + 3: + ui = uint64(bigen.Uint64(d.r.readn8())) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + neg = true + case simpleVdNegInt + 1: + ui = uint64(bigen.Uint16(d.r.readn2())) + neg = true + case simpleVdNegInt + 2: + ui = uint64(bigen.Uint32(d.r.readn4())) + neg = true + case simpleVdNegInt + 3: + ui = uint64(bigen.Uint64(d.r.readn8())) + neg = true + default: + ok = false + + } + + return +} + +func (d *simpleDecDriverBytes) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false) + d.bdRead = false + return +} + +func (d *simpleDecDriverBytes) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) + d.bdRead = false + return +} + +func (d *simpleDecDriverBytes) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false) + d.bdRead = false + return +} + +func (d *simpleDecDriverBytes) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.bd == simpleVdFalse { + } else if d.bd == simpleVdTrue { + b = true + } else { + halt.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriverBytes) ReadMapStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriverBytes) ReadArrayStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriverBytes) uint2Len(ui uint64) int { + if chkOvf.Uint(ui, intBitsize) { + halt.errorf("overflow integer: %v", ui) + } + return int(ui) +} + +func (d *simpleDecDriverBytes) decLen() int { + switch d.bd & 7 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readn2())) + case 3: + return d.uint2Len(uint64(bigen.Uint32(d.r.readn4()))) + case 4: + return d.uint2Len(bigen.Uint64(d.r.readn8())) + } + halt.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriverBytes) DecodeStringAsBytes() ([]byte, dBytesAttachState) { + return d.DecodeBytes() +} + +func (d *simpleDecDriverBytes) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + var cond bool + + if d.bd >= simpleVdArray && d.bd <= simpleVdArray+4 { + slen := d.ReadArrayStart() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < len(bs); i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + + return + } + + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *simpleDecDriverBytes) DecodeTime() (t time.Time) { + if d.advanceNil() { + return + } + if d.bd != simpleVdTime { + halt.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) + } + d.bdRead = false + clen := uint(d.r.readn1()) + b := d.r.readx(clen) + halt.onerror((&t).UnmarshalBinary(b)) + return +} + +func (d *simpleDecDriverBytes) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) + } else { + ext.ReadExt(rv, xbs) + } +} + +func (d *simpleDecDriverBytes) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *simpleDecDriverBytes) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + halt.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs, ok = d.r.readxb(uint(l)) + bstate = d.d.attachState(ok) + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, bstate = d.DecodeBytes() + default: + halt.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) + } + d.bdRead = false + ok = true + return +} + +func (d *simpleDecDriverBytes) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdTime: + n.v = valueTypeTime + n.t = d.DecodeTime() + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(uint(l)) + + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, + simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) + } + + if !decodeFurther { + d.bdRead = false + } +} + +func (d *simpleDecDriverBytes) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *simpleDecDriverBytes) nextValueBytesBdReadR() { + c := d.bd + + var length uint + + switch c { + case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray: + + case simpleVdPosInt, simpleVdNegInt: + d.r.readn1() + case simpleVdPosInt + 1, simpleVdNegInt + 1: + d.r.skip(2) + case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32: + d.r.skip(4) + case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64: + d.r.skip(8) + case simpleVdTime: + c = d.r.readn1() + d.r.skip(uint(c)) + + default: + switch c & 7 { + case 0: + length = 0 + case 1: + b := d.r.readn1() + length = uint(b) + case 2: + x := d.r.readn2() + length = uint(bigen.Uint16(x)) + case 3: + x := d.r.readn4() + length = uint(bigen.Uint32(x)) + case 4: + x := d.r.readn8() + length = uint(bigen.Uint64(x)) + } + + bExt := c >= simpleVdExt && c <= simpleVdExt+7 + bStr := c >= simpleVdString && c <= simpleVdString+7 + bByteArray := c >= simpleVdByteArray && c <= simpleVdByteArray+7 + bArray := c >= simpleVdArray && c <= simpleVdArray+7 + bMap := c >= simpleVdMap && c <= simpleVdMap+7 + + if !(bExt || bStr || bByteArray || bArray || bMap) { + halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, c) + } + + if bExt { + d.r.readn1() + } + + if length == 0 { + break + } + + if bArray { + for i := uint(0); i < length; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + } else if bMap { + for i := uint(0); i < length; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + } else { + d.r.skip(length) + } + } + return +} + +func (d *simpleEncDriverBytes) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*SimpleHandle) + d.e = shared + if shared.bytes { + fp = simpleFpEncBytes + } else { + fp = simpleFpEncIO + } + + d.init2(enc) + return +} + +func (e *simpleEncDriverBytes) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *simpleEncDriverBytes) writerEnd() { e.w.end() } + +func (e *simpleEncDriverBytes) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *simpleEncDriverBytes) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *simpleDecDriverBytes) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*SimpleHandle) + d.d = shared + if shared.bytes { + fp = simpleFpDecBytes + } else { + fp = simpleFpDecIO + } + + d.init2(dec) + return +} + +func (d *simpleDecDriverBytes) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *simpleDecDriverBytes) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *simpleDecDriverBytes) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *simpleDecDriverBytes) descBd() string { + return sprintf("%v (%s)", d.bd, simpledesc(d.bd)) +} + +func (d *simpleDecDriverBytes) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} + +type helperEncDriverSimpleIO struct{} +type encFnSimpleIO struct { + i encFnInfo + fe func(*encoderSimpleIO, *encFnInfo, reflect.Value) +} +type encRtidFnSimpleIO struct { + rtid uintptr + fn *encFnSimpleIO +} +type encoderSimpleIO struct { + dh helperEncDriverSimpleIO + fp *fastpathEsSimpleIO + e simpleEncDriverIO + encoderBase +} +type helperDecDriverSimpleIO struct{} +type decFnSimpleIO struct { + i decFnInfo + fd func(*decoderSimpleIO, *decFnInfo, reflect.Value) +} +type decRtidFnSimpleIO struct { + rtid uintptr + fn *decFnSimpleIO +} +type decoderSimpleIO struct { + dh helperDecDriverSimpleIO + fp *fastpathDsSimpleIO + d simpleDecDriverIO + decoderBase +} +type simpleEncDriverIO struct { + noBuiltInTypes + encDriverNoopContainerWriter + encDriverNoState + encDriverContainerNoTrackerT + encInit2er + + h *SimpleHandle + e *encoderBase + + w bufioEncWriter +} +type simpleDecDriverIO struct { + h *SimpleHandle + d *decoderBase + r ioDecReader + + bdAndBdread + + noBuiltInTypes + + decDriverNoopContainerReader + decInit2er +} + +func (e *encoderSimpleIO) rawExt(_ *encFnInfo, rv reflect.Value) { + if re := rv2i(rv).(*RawExt); re == nil { + e.e.EncodeNil() + } else { + e.e.EncodeRawExt(re) + } +} + +func (e *encoderSimpleIO) ext(f *encFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (e *encoderSimpleIO) selferMarshal(_ *encFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(&Encoder{e}) +} + +func (e *encoderSimpleIO) binaryMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *encoderSimpleIO) textMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *encoderSimpleIO) jsonMarshal(_ *encFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *encoderSimpleIO) raw(_ *encFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *encoderSimpleIO) encodeComplex64(v complex64) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat32(real(v)) +} + +func (e *encoderSimpleIO) encodeComplex128(v complex128) { + if imag(v) != 0 { + halt.errorf("cannot encode complex number: %v, with imaginary values: %v", any(v), any(imag(v))) + } + e.e.EncodeFloat64(real(v)) +} + +func (e *encoderSimpleIO) kBool(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeBool(rvGetBool(rv)) +} + +func (e *encoderSimpleIO) kTime(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeTime(rvGetTime(rv)) +} + +func (e *encoderSimpleIO) kString(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeString(rvGetString(rv)) +} + +func (e *encoderSimpleIO) kFloat32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(rvGetFloat32(rv)) +} + +func (e *encoderSimpleIO) kFloat64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rvGetFloat64(rv)) +} + +func (e *encoderSimpleIO) kComplex64(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex64(rvGetComplex64(rv)) +} + +func (e *encoderSimpleIO) kComplex128(_ *encFnInfo, rv reflect.Value) { + e.encodeComplex128(rvGetComplex128(rv)) +} + +func (e *encoderSimpleIO) kInt(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt(rv))) +} + +func (e *encoderSimpleIO) kInt8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt8(rv))) +} + +func (e *encoderSimpleIO) kInt16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt16(rv))) +} + +func (e *encoderSimpleIO) kInt32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt32(rv))) +} + +func (e *encoderSimpleIO) kInt64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeInt(int64(rvGetInt64(rv))) +} + +func (e *encoderSimpleIO) kUint(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint(rv))) +} + +func (e *encoderSimpleIO) kUint8(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint8(rv))) +} + +func (e *encoderSimpleIO) kUint16(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint16(rv))) +} + +func (e *encoderSimpleIO) kUint32(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint32(rv))) +} + +func (e *encoderSimpleIO) kUint64(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUint64(rv))) +} + +func (e *encoderSimpleIO) kUintptr(_ *encFnInfo, rv reflect.Value) { + e.e.EncodeUint(uint64(rvGetUintptr(rv))) +} + +func (e *encoderSimpleIO) kSeqFn(rt reflect.Type) (fn *encFnSimpleIO) { + + if rt = baseRT(rt); rt.Kind() != reflect.Interface { + fn = e.fn(rt) + } + return +} + +func (e *encoderSimpleIO) kArrayWMbs(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.haltOnMbsOddLen(l) + e.mapStart(l >> 1) + + var fn *encFnSimpleIO + builtin := ti.tielem.flagEncBuiltin + if !builtin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerMapKey + e.e.WriteMapElemKey(true) + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + if j&1 == 0 { + e.c = containerMapKey + e.e.WriteMapElemKey(false) + } else { + e.mapElemValue() + } + } + e.c = 0 + e.e.WriteMapEnd() + +} + +func (e *encoderSimpleIO) kArrayW(rv reflect.Value, ti *typeInfo, isSlice bool) { + var l int + if isSlice { + l = rvLenSlice(rv) + } else { + l = rv.Len() + } + if l <= 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(l) + + var fn *encFnSimpleIO + if !ti.tielem.flagEncBuiltin { + fn = e.kSeqFn(ti.elem) + } + + j := 0 + e.c = containerArrayElem + e.e.WriteArrayElem(true) + builtin := ti.tielem.flagEncBuiltin + for { + rvv := rvArrayIndex(rv, j, ti, isSlice) + if builtin { + e.encodeIB(rv2i(baseRVRV(rvv))) + } else { + e.encodeValue(rvv, fn) + } + j++ + if j == l { + break + } + e.c = containerArrayElem + e.e.WriteArrayElem(false) + } + + e.c = 0 + e.e.WriteArrayEnd() +} + +func (e *encoderSimpleIO) kChan(f *encFnInfo, rv reflect.Value) { + if f.ti.chandir&uint8(reflect.RecvDir) == 0 { + halt.errorStr("send-only channel cannot be encoded") + } + if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) { + e.kSliceBytesChan(rv) + return + } + rtslice := reflect.SliceOf(f.ti.elem) + rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout) + ti := e.h.getTypeInfo(rt2id(rtslice), rtslice) + if f.ti.mbs { + e.kArrayWMbs(rv, ti, true) + } else { + e.kArrayW(rv, ti, true) + } +} + +func (e *encoderSimpleIO) kSlice(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, true) + } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) { + + e.e.EncodeBytes(rvGetBytes(rv)) + } else { + e.kArrayW(rv, f.ti, true) + } +} + +func (e *encoderSimpleIO) kArray(f *encFnInfo, rv reflect.Value) { + if f.ti.mbs { + e.kArrayWMbs(rv, f.ti, false) + } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) { + e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, nil)) + } else { + e.kArrayW(rv, f.ti, false) + } +} + +func (e *encoderSimpleIO) kSliceBytesChan(rv reflect.Value) { + + bs0 := e.blist.peek(32, true) + bs := bs0 + + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + +L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + + break L1 + } + } + default: + for b := range ch { + bs = append(bs, b) + } + } + + e.e.EncodeBytes(bs) + e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.blist.put(bs0) + } +} + +func (e *encoderSimpleIO) kStructFieldKey(keyType valueType, encName string) { + + if keyType == valueTypeString { + e.e.EncodeString(encName) + } else if keyType == valueTypeInt { + e.e.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + e.e.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + e.e.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64))) + } else { + halt.errorStr2("invalid struct key type: ", keyType.String()) + } + +} + +func (e *encoderSimpleIO) kStructSimple(f *encFnInfo, rv reflect.Value) { + _ = e.e + tisfi := f.ti.sfi.source() + + chkCirRef := e.h.CheckCircularRef + var si *structFieldInfo + var j int + + if f.ti.toArray || e.h.StructToArray { + if len(tisfi) == 0 { + e.e.WriteArrayEmpty() + return + } + e.arrayStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } else { + if len(tisfi) == 0 { + e.e.WriteMapEmpty() + return + } + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + e.mapStart(len(tisfi)) + for j, si = range tisfi { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.EncodeStringNoEscape4Json(si.encName) + e.mapElemValue() + if si.encBuiltin { + e.encodeIB(rv2i(si.fieldNoAlloc(rv, true))) + } else { + e.encodeValue(si.fieldNoAlloc(rv, !chkCirRef), nil) + } + } + e.c = 0 + e.e.WriteMapEnd() + } +} + +func (e *encoderSimpleIO) kStruct(f *encFnInfo, rv reflect.Value) { + _ = e.e + ti := f.ti + toMap := !(ti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if ti.flagMissingFielder { + toMap = true + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + } else if ti.flagMissingFielderPtr { + toMap = true + if rv.CanAddr() { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder).CodecMissingFields() + } else { + mf = rv2i(e.addrRV(rv, ti.rt, ti.ptr)).(MissingFielder).CodecMissingFields() + } + } + newlen := len(mf) + tisfi := ti.sfi.source() + newlen += len(tisfi) + + var fkvs = e.slist.get(newlen)[:newlen] + + recur := e.h.RecursiveEmptyCheck + chkCirRef := e.h.CheckCircularRef + + var xlen int + + var kv sfiRv + var j int + var sf encStructFieldObj + if toMap { + newlen = 0 + if e.h.Canonical { + tisfi = f.ti.sfi.sorted() + } + for _, si := range tisfi { + + if si.omitEmpty { + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyValue(kv.r, e.h.TypeInfos, recur) { + continue + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[newlen] = kv + newlen++ + } + + var mf2s []stringIntf + if len(mf) != 0 { + mf2s = make([]stringIntf, 0, len(mf)) + for k, v := range mf { + if k == "" { + continue + } + if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) { + continue + } + mf2s = append(mf2s, stringIntf{k, v}) + } + } + + xlen = newlen + len(mf2s) + if xlen == 0 { + e.e.WriteMapEmpty() + goto END + } + + e.mapStart(xlen) + + if len(mf2s) != 0 && e.h.Canonical { + mf2w := make([]encStructFieldObj, newlen+len(mf2s)) + for j = 0; j < newlen; j++ { + kv = fkvs[j] + mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, true, + !kv.v.encNameEscape4Json, kv.v.encBuiltin} + } + for _, v := range mf2s { + mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false, false} + j++ + } + sort.Sort((encStructFieldObjSlice)(mf2w)) + for j, sf = range mf2w { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && sf.noEsc4json { + e.e.EncodeStringNoEscape4Json(sf.key) + } else { + e.kStructFieldKey(ti.keyType, sf.key) + } + e.mapElemValue() + if sf.isRv { + if sf.builtin { + e.encodeIB(rv2i(baseRVRV(sf.rv))) + } else { + e.encodeValue(sf.rv, nil) + } + } else { + if !e.encodeBuiltin(sf.intf) { + e.encodeR(reflect.ValueOf(sf.intf)) + } + + } + } + } else { + keytyp := ti.keyType + for j = 0; j < newlen; j++ { + kv = fkvs[j] + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if ti.keyType == valueTypeString && !kv.v.encNameEscape4Json { + e.e.EncodeStringNoEscape4Json(kv.v.encName) + } else { + e.kStructFieldKey(keytyp, kv.v.encName) + } + e.mapElemValue() + if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + for _, v := range mf2s { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.kStructFieldKey(keytyp, v.v) + e.mapElemValue() + if !e.encodeBuiltin(v.i) { + e.encodeR(reflect.ValueOf(v.i)) + } + + j++ + } + } + + e.c = 0 + e.e.WriteMapEnd() + } else { + newlen = len(tisfi) + for i, si := range tisfi { + + if si.omitEmpty { + + kv.r = si.fieldNoAlloc(rv, false) + if isEmptyContainerValue(kv.r, e.h.TypeInfos, recur) { + kv.r = reflect.Value{} + } + } else { + kv.r = si.fieldNoAlloc(rv, si.encBuiltin || !chkCirRef) + } + kv.v = si + fkvs[i] = kv + } + + if newlen == 0 { + e.e.WriteArrayEmpty() + goto END + } + + e.arrayStart(newlen) + for j = 0; j < newlen; j++ { + e.c = containerArrayElem + e.e.WriteArrayElem(j == 0) + kv = fkvs[j] + if !kv.r.IsValid() { + e.e.EncodeNil() + } else if kv.v.encBuiltin { + e.encodeIB(rv2i(baseRVRV(kv.r))) + } else { + e.encodeValue(kv.r, nil) + } + } + e.c = 0 + e.e.WriteArrayEnd() + } + +END: + + e.slist.put(fkvs) +} + +func (e *encoderSimpleIO) kMap(f *encFnInfo, rv reflect.Value) { + _ = e.e + l := rvLenMap(rv) + if l == 0 { + e.e.WriteMapEmpty() + return + } + e.mapStart(l) + + var keyFn, valFn *encFnSimpleIO + + ktypeKind := reflect.Kind(f.ti.keykind) + vtypeKind := reflect.Kind(f.ti.elemkind) + + rtval := f.ti.elem + rtvalkind := vtypeKind + for rtvalkind == reflect.Ptr { + rtval = rtval.Elem() + rtvalkind = rtval.Kind() + } + if rtvalkind != reflect.Interface { + valFn = e.fn(rtval) + } + + var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind) + + rtkey := f.ti.key + var keyTypeIsString = stringTypId == rt2id(rtkey) + if keyTypeIsString { + keyFn = e.fn(rtkey) + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + keyFn = e.fn(rtkey) + } + } + + if e.h.Canonical { + e.kMapCanonical(f.ti, rv, rvv, keyFn, valFn) + e.c = 0 + e.e.WriteMapEnd() + return + } + + var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind) + + var it mapIter + mapRange(&it, rv, rvk, rvv, true) + + kbuiltin := f.ti.tikey.flagEncBuiltin + vbuiltin := f.ti.tielem.flagEncBuiltin + for j := 0; it.Next(); j++ { + rv = it.Key() + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + if keyTypeIsString { + e.e.EncodeString(rvGetString(rv)) + } else if kbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(rv, keyFn) + } + e.mapElemValue() + rv = it.Value() + if vbuiltin { + e.encodeIB(rv2i(baseRVRV(rv))) + } else { + e.encodeValue(it.Value(), valFn) + } + } + it.Done() + + e.c = 0 + e.e.WriteMapEnd() +} + +func (e *encoderSimpleIO) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, keyFn, valFn *encFnSimpleIO) { + _ = e.e + + rtkey := ti.key + rtkeydecl := rtkey.PkgPath() == "" && rtkey.Name() != "" + + mks := rv.MapKeys() + rtkeyKind := rtkey.Kind() + mparams := getMapReqParams(ti) + + switch rtkeyKind { + case reflect.Bool: + + if len(mks) == 2 && mks[0].Bool() { + mks[0], mks[1] = mks[1], mks[0] + } + for i := range mks { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeBool(mks[i].Bool()) + } else { + e.encodeValueNonNil(mks[i], keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mks[i], rvv, mparams), valFn) + } + case reflect.String: + mksv := make([]orderedRv[string], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rvGetString(k) + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeString(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]orderedRv[uint64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeUint(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]orderedRv[int64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeInt(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float32: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat32(float32(mksv[i].v)) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + case reflect.Float64: + mksv := make([]orderedRv[float64], len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + slices.SortFunc(mksv, cmpOrderedRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + if rtkeydecl { + e.e.EncodeFloat64(mksv[i].v) + } else { + e.encodeValueNonNil(mksv[i].r, keyFn) + } + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + default: + if rtkey == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + slices.SortFunc(mksv, cmpTimeRv) + for i := range mksv { + e.c = containerMapKey + e.e.WriteMapElemKey(i == 0) + e.e.EncodeTime(mksv[i].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksv[i].r, rvv, mparams), valFn) + } + break + } + + bs0 := e.blist.get(len(mks) * 16) + mksv := bs0 + mksbv := make([]bytesRv, len(mks)) + + sideEncode(e.hh, &e.h.sideEncPool, func(se encoderI) { + se.ResetBytes(&mksv) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + se.setContainerState(containerMapKey) + se.encodeR(baseRVRV(k)) + se.atEndOfEncode() + se.writerEnd() + v.r = k + v.v = mksv[l:] + } + }) + + slices.SortFunc(mksbv, cmpBytesRv) + for j := range mksbv { + e.c = containerMapKey + e.e.WriteMapElemKey(j == 0) + e.e.writeBytesAsis(mksbv[j].v) + e.mapElemValue() + e.encodeValue(mapGet(rv, mksbv[j].r, rvv, mparams), valFn) + } + e.blist.put(mksv) + if !byteSliceSameData(bs0, mksv) { + e.blist.put(bs0) + } + } +} + +func (e *encoderSimpleIO) init(h Handle) { + initHandle(h) + callMake(&e.e) + e.hh = h + e.h = h.getBasicHandle() + + e.err = errEncoderNotInitialized + + e.fp = e.e.init(h, &e.encoderBase, e).(*fastpathEsSimpleIO) + + if e.bytes { + e.rtidFn = &e.h.rtidFnsEncBytes + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtBytes + } else { + e.rtidFn = &e.h.rtidFnsEncIO + e.rtidFnNoExt = &e.h.rtidFnsEncNoExtIO + } + + e.reset() +} + +func (e *encoderSimpleIO) reset() { + e.e.reset() + if e.ci != nil { + e.ci = e.ci[:0] + } + e.c = 0 + e.calls = 0 + e.seq = 0 + e.err = nil +} + +func (e *encoderSimpleIO) Encode(v interface{}) (err error) { + + defer panicValToErr(e, callRecoverSentinel, &e.err, &err, debugging) + e.mustEncode(v) + return +} + +func (e *encoderSimpleIO) MustEncode(v interface{}) { + defer panicValToErr(e, callRecoverSentinel, &e.err, nil, true) + e.mustEncode(v) + return +} + +func (e *encoderSimpleIO) mustEncode(v interface{}) { + halt.onerror(e.err) + if e.hh == nil { + halt.onerror(errNoFormatHandle) + } + + e.calls++ + if !e.encodeBuiltin(v) { + e.encodeR(reflect.ValueOf(v)) + } + + e.calls-- + if e.calls == 0 { + e.e.atEndOfEncode() + e.e.writerEnd() + } +} + +func (e *encoderSimpleIO) encodeI(iv interface{}) { + if !e.encodeBuiltin(iv) { + e.encodeR(reflect.ValueOf(iv)) + } +} + +func (e *encoderSimpleIO) encodeIB(iv interface{}) { + if !e.encodeBuiltin(iv) { + + halt.errorStr("[should not happen] invalid type passed to encodeBuiltin") + } +} + +func (e *encoderSimpleIO) encodeR(base reflect.Value) { + e.encodeValue(base, nil) +} + +func (e *encoderSimpleIO) encodeBuiltin(iv interface{}) (ok bool) { + ok = true + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + + case Raw: + e.rawBytes(v) + case string: + e.e.EncodeString(v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case complex64: + e.encodeComplex64(v) + case complex128: + e.encodeComplex128(v) + case time.Time: + e.e.EncodeTime(v) + case []byte: + e.e.EncodeBytes(v) + default: + + ok = !skipFastpathTypeSwitchInDirectCall && e.dh.fastpathEncodeTypeSwitch(iv, e) + } + return +} + +func (e *encoderSimpleIO) encodeValue(rv reflect.Value, fn *encFnSimpleIO) { + + var ciPushes int + + var rvp reflect.Value + var rvpValid bool + +RV: + switch rv.Kind() { + case reflect.Ptr: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = true + rvp = rv + rv = rv.Elem() + + if e.h.CheckCircularRef && e.ci.canPushElemKind(rv.Kind()) { + e.ci.push(rv2i(rvp)) + ciPushes++ + } + goto RV + case reflect.Interface: + if rvIsNil(rv) { + e.e.EncodeNil() + goto END + } + rvpValid = false + rvp = reflect.Value{} + rv = rv.Elem() + fn = nil + goto RV + case reflect.Map: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteMapEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Slice, reflect.Chan: + if rvIsNil(rv) { + if e.h.NilCollectionToZeroLength { + e.e.WriteArrayEmpty() + } else { + e.e.EncodeNil() + } + goto END + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + goto END + } + + if fn == nil { + fn = e.fn(rv.Type()) + } + + if !fn.i.addrE { + + } else if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + fn.fe(e, &fn.i, rv) + +END: + if ciPushes > 0 { + e.ci.pop(ciPushes) + } +} + +func (e *encoderSimpleIO) encodeValueNonNil(rv reflect.Value, fn *encFnSimpleIO) { + + if fn.i.addrE { + if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else { + rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr) + } + } + fn.fe(e, &fn.i, rv) +} + +func (e *encoderSimpleIO) encodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + e.encodeValue(baseRV(v), e.fn(t)) + } else { + e.encodeValue(baseRV(v), e.fnNoExt(t)) + } +} + +func (e *encoderSimpleIO) marshalUtf8(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeString(stringView(bs)) + } +} + +func (e *encoderSimpleIO) marshalAsis(bs []byte, fnerr error) { + halt.onerror(fnerr) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.writeBytesAsis(bs) + } +} + +func (e *encoderSimpleIO) marshalRaw(bs []byte, fnerr error) { + halt.onerror(fnerr) + e.e.EncodeBytes(bs) +} + +func (e *encoderSimpleIO) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + halt.errorBytes("Raw values cannot be encoded: ", v) + } + e.e.writeBytesAsis(v) +} + +func (e *encoderSimpleIO) fn(t reflect.Type) *encFnSimpleIO { + return e.dh.encFnViaBH(t, e.rtidFn, e.h, e.fp, false) +} + +func (e *encoderSimpleIO) fnNoExt(t reflect.Type) *encFnSimpleIO { + return e.dh.encFnViaBH(t, e.rtidFnNoExt, e.h, e.fp, true) +} + +func (e *encoderSimpleIO) mapStart(length int) { + e.e.WriteMapStart(length) + e.c = containerMapStart +} + +func (e *encoderSimpleIO) mapElemValue() { + e.e.WriteMapElemValue() + e.c = containerMapValue +} + +func (e *encoderSimpleIO) arrayStart(length int) { + e.e.WriteArrayStart(length) + e.c = containerArrayStart +} + +func (e *encoderSimpleIO) writerEnd() { + e.e.writerEnd() +} + +func (e *encoderSimpleIO) atEndOfEncode() { + e.e.atEndOfEncode() +} + +func (e *encoderSimpleIO) Reset(w io.Writer) { + if e.bytes { + halt.onerror(errEncNoResetBytesWithWriter) + } + e.reset() + if w == nil { + w = io.Discard + } + e.e.resetOutIO(w) +} + +func (e *encoderSimpleIO) ResetBytes(out *[]byte) { + if !e.bytes { + halt.onerror(errEncNoResetWriterWithBytes) + } + e.resetBytes(out) +} + +func (e *encoderSimpleIO) resetBytes(out *[]byte) { + e.reset() + if out == nil { + out = &bytesEncAppenderDefOut + } + e.e.resetOutBytes(out) +} + +func (helperEncDriverSimpleIO) newEncoderBytes(out *[]byte, h Handle) *encoderSimpleIO { + var c1 encoderSimpleIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(out) + return &c1 +} + +func (helperEncDriverSimpleIO) newEncoderIO(out io.Writer, h Handle) *encoderSimpleIO { + var c1 encoderSimpleIO + c1.bytes = false + c1.init(h) + c1.Reset(out) + return &c1 +} + +func (helperEncDriverSimpleIO) encFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathEsSimpleIO) (f *fastpathESimpleIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperEncDriverSimpleIO) encFindRtidFn(s []encRtidFnSimpleIO, rtid uintptr) (i uint, fn *encFnSimpleIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperEncDriverSimpleIO) encFromRtidFnSlice(fns *atomicRtidFnSlice) (s []encRtidFnSimpleIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]encRtidFnSimpleIO](v)) + } + return +} + +func (dh helperEncDriverSimpleIO) encFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, + x *BasicHandle, fp *fastpathEsSimpleIO, checkExt bool) (fn *encFnSimpleIO) { + return dh.encFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperEncDriverSimpleIO) encFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsSimpleIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnSimpleIO) { + rtid := rt2id(rt) + var sp []encRtidFnSimpleIO = dh.encFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.encFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.encFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperEncDriverSimpleIO) encFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathEsSimpleIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnSimpleIO) { + + fn = dh.encFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []encRtidFnSimpleIO + mu.Lock() + sp = dh.encFromRtidFnSlice(fns) + + if sp == nil { + sp = []encRtidFnSimpleIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.encFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]encRtidFnSimpleIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = encRtidFnSimpleIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperEncDriverSimpleIO) encFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathEsSimpleIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *encFnSimpleIO) { + fn = new(encFnSimpleIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + if rtid == timeTypId && timeBuiltin { + fn.fe = (*encoderSimpleIO).kTime + } else if rtid == rawTypId { + fn.fe = (*encoderSimpleIO).raw + } else if rtid == rawExtTypId { + fn.fe = (*encoderSimpleIO).rawExt + fi.addrE = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*encoderSimpleIO).ext + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fe = (*encoderSimpleIO).selferMarshal + fi.addrE = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fe = (*encoderSimpleIO).binaryMarshal + fi.addrE = ti.flagBinaryMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fe = (*encoderSimpleIO).jsonMarshal + fi.addrE = ti.flagJsonMarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fe = (*encoderSimpleIO).textMarshal + fi.addrE = ti.flagTextMarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fe = fp[idx].encfn + } + } else { + + xfe, xrt := dh.encFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf := xfe.encfn + fn.fe = func(e *encoderSimpleIO, xf *encFnInfo, xrv reflect.Value) { + xfnf(e, xf, rvConvert(xrv, xrt)) + } + } + } + } + if fn.fe == nil { + switch rk { + case reflect.Bool: + fn.fe = (*encoderSimpleIO).kBool + case reflect.String: + + fn.fe = (*encoderSimpleIO).kString + case reflect.Int: + fn.fe = (*encoderSimpleIO).kInt + case reflect.Int8: + fn.fe = (*encoderSimpleIO).kInt8 + case reflect.Int16: + fn.fe = (*encoderSimpleIO).kInt16 + case reflect.Int32: + fn.fe = (*encoderSimpleIO).kInt32 + case reflect.Int64: + fn.fe = (*encoderSimpleIO).kInt64 + case reflect.Uint: + fn.fe = (*encoderSimpleIO).kUint + case reflect.Uint8: + fn.fe = (*encoderSimpleIO).kUint8 + case reflect.Uint16: + fn.fe = (*encoderSimpleIO).kUint16 + case reflect.Uint32: + fn.fe = (*encoderSimpleIO).kUint32 + case reflect.Uint64: + fn.fe = (*encoderSimpleIO).kUint64 + case reflect.Uintptr: + fn.fe = (*encoderSimpleIO).kUintptr + case reflect.Float32: + fn.fe = (*encoderSimpleIO).kFloat32 + case reflect.Float64: + fn.fe = (*encoderSimpleIO).kFloat64 + case reflect.Complex64: + fn.fe = (*encoderSimpleIO).kComplex64 + case reflect.Complex128: + fn.fe = (*encoderSimpleIO).kComplex128 + case reflect.Chan: + fn.fe = (*encoderSimpleIO).kChan + case reflect.Slice: + fn.fe = (*encoderSimpleIO).kSlice + case reflect.Array: + fn.fe = (*encoderSimpleIO).kArray + case reflect.Struct: + if ti.simple { + fn.fe = (*encoderSimpleIO).kStructSimple + } else { + fn.fe = (*encoderSimpleIO).kStruct + } + case reflect.Map: + fn.fe = (*encoderSimpleIO).kMap + case reflect.Interface: + + fn.fe = (*encoderSimpleIO).kErr + default: + + fn.fe = (*encoderSimpleIO).kErr + } + } + } + return +} +func (d *decoderSimpleIO) rawExt(f *decFnInfo, rv reflect.Value) { + d.d.DecodeRawExt(rv2i(rv).(*RawExt)) +} + +func (d *decoderSimpleIO) ext(f *decFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn) +} + +func (d *decoderSimpleIO) selferUnmarshal(_ *decFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(&Decoder{d}) +} + +func (d *decoderSimpleIO) binaryUnmarshal(_ *decFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs, _ := d.d.DecodeBytes() + fnerr := bm.UnmarshalBinary(xbs) + halt.onerror(fnerr) +} + +func (d *decoderSimpleIO) textUnmarshal(_ *decFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(bytesOKs(d.d.DecodeStringAsBytes())) + halt.onerror(fnerr) +} + +func (d *decoderSimpleIO) jsonUnmarshal(_ *decFnInfo, rv reflect.Value) { + d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler)) +} + +func (d *decoderSimpleIO) jsonUnmarshalV(tm jsonUnmarshaler) { + + halt.onerror(tm.UnmarshalJSON(d.d.nextValueBytes())) +} + +func (d *decoderSimpleIO) kErr(_ *decFnInfo, rv reflect.Value) { + halt.errorf("unsupported decoding kind: %s, for %#v", rv.Kind(), rv) + +} + +func (d *decoderSimpleIO) raw(_ *decFnInfo, rv reflect.Value) { + rvSetBytes(rv, d.rawBytes()) +} + +func (d *decoderSimpleIO) kString(_ *decFnInfo, rv reflect.Value) { + rvSetString(rv, d.detach2Str(d.d.DecodeStringAsBytes())) +} + +func (d *decoderSimpleIO) kBool(_ *decFnInfo, rv reflect.Value) { + rvSetBool(rv, d.d.DecodeBool()) +} + +func (d *decoderSimpleIO) kTime(_ *decFnInfo, rv reflect.Value) { + rvSetTime(rv, d.d.DecodeTime()) +} + +func (d *decoderSimpleIO) kFloat32(_ *decFnInfo, rv reflect.Value) { + rvSetFloat32(rv, d.d.DecodeFloat32()) +} + +func (d *decoderSimpleIO) kFloat64(_ *decFnInfo, rv reflect.Value) { + rvSetFloat64(rv, d.d.DecodeFloat64()) +} + +func (d *decoderSimpleIO) kComplex64(_ *decFnInfo, rv reflect.Value) { + rvSetComplex64(rv, complex(d.d.DecodeFloat32(), 0)) +} + +func (d *decoderSimpleIO) kComplex128(_ *decFnInfo, rv reflect.Value) { + rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0)) +} + +func (d *decoderSimpleIO) kInt(_ *decFnInfo, rv reflect.Value) { + rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))) +} + +func (d *decoderSimpleIO) kInt8(_ *decFnInfo, rv reflect.Value) { + rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8))) +} + +func (d *decoderSimpleIO) kInt16(_ *decFnInfo, rv reflect.Value) { + rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16))) +} + +func (d *decoderSimpleIO) kInt32(_ *decFnInfo, rv reflect.Value) { + rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32))) +} + +func (d *decoderSimpleIO) kInt64(_ *decFnInfo, rv reflect.Value) { + rvSetInt64(rv, d.d.DecodeInt64()) +} + +func (d *decoderSimpleIO) kUint(_ *decFnInfo, rv reflect.Value) { + rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderSimpleIO) kUintptr(_ *decFnInfo, rv reflect.Value) { + rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))) +} + +func (d *decoderSimpleIO) kUint8(_ *decFnInfo, rv reflect.Value) { + rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))) +} + +func (d *decoderSimpleIO) kUint16(_ *decFnInfo, rv reflect.Value) { + rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))) +} + +func (d *decoderSimpleIO) kUint32(_ *decFnInfo, rv reflect.Value) { + rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))) +} + +func (d *decoderSimpleIO) kUint64(_ *decFnInfo, rv reflect.Value) { + rvSetUint64(rv, d.d.DecodeUint64()) +} + +func (d *decoderSimpleIO) kInterfaceNaked(f *decFnInfo) (rvn reflect.Value) { + + n := d.naked() + d.d.DecodeNaked() + + if decFailNonEmptyIntf && f.ti.numMeth > 0 { + halt.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + } + + switch n.v { + case valueTypeMap: + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapStrIntfTypId { + var v2 map[string]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + + rvn = rvZeroAddrK(d.h.MapType, reflect.Map) + d.decodeValue(rvn, nil) + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = rv4iptr(&v2).Elem() + } else if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice) + d.decodeValue(rvn, nil) + } + if d.h.PreferArrayOverSlice { + rvn = rvGetArray4Slice(rvn) + } + case valueTypeExt: + tag, bytes := n.u, n.l + bfn := d.h.getExtForTag(tag) + var re = RawExt{Tag: tag} + if bytes == nil { + + if bfn == nil { + d.decode(&re.Value) + rvn = rv4iptr(&re).Elem() + } else if bfn.ext == SelfExt { + rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind()) + d.decodeValue(rvn, d.fnNoExt(bfn.rt)) + } else { + rvn = reflect.New(bfn.rt) + d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext) + rvn = rvn.Elem() + } + } else { + + if bfn == nil { + re.setData(bytes, false) + rvn = rv4iptr(&re).Elem() + } else { + rvn = reflect.New(bfn.rt) + if bfn.ext == SelfExt { + sideDecode(d.hh, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv2i(rvn), bytes, bfn.rt, true) }) + } else { + bfn.ext.ReadExt(rv2i(rvn), bytes) + } + rvn = rvn.Elem() + } + } + + if d.h.PreferPointerForStructOrArray && rvn.CanAddr() { + if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct { + rvn = rvn.Addr() + } + } + case valueTypeNil: + + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + halt.errorStr2("kInterfaceNaked: unexpected valueType: ", n.v.String()) + } + return +} + +func (d *decoderSimpleIO) kInterface(f *decFnInfo, rv reflect.Value) { + + isnilrv := rvIsNil(rv) + + var rvn reflect.Value + + if d.h.InterfaceReset { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } else if !isnilrv { + decSetNonNilRV2Zero4Intf(rv) + } + return + } + } else if isnilrv { + + rvn = d.h.intf2impl(f.ti.rtid) + if !rvn.IsValid() { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rvSetIntf(rv, rvn) + } + return + } + } else { + + rvn = rv.Elem() + } + + canDecode, _ := isDecodeable(rvn) + + if !canDecode { + rvn2 := d.oneShotAddrRV(rvn.Type(), rvn.Kind()) + rvSetDirect(rvn2, rvn) + rvn = rvn2 + } + + d.decodeValue(rvn, nil) + rvSetIntf(rv, rvn) +} + +func (d *decoderSimpleIO) kStructField(si *structFieldInfo, rv reflect.Value) { + if d.d.TryNil() { + rv = si.fieldNoAlloc(rv, true) + if rv.IsValid() { + decSetNonNilRV2Zero(rv) + } + } else if si.decBuiltin { + rv = rvAddr(si.fieldAlloc(rv), si.ptrTyp) + d.decode(rv2i(rv)) + } else { + fn := d.fn(si.baseTyp) + rv = si.fieldAlloc(rv) + if fn.i.addrD { + rv = rvAddr(rv, si.ptrTyp) + } + fn.fd(d, &fn.i, rv) + } +} + +func (d *decoderSimpleIO) kStructSimple(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var rvkencname []byte + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderSimpleIO) kStruct(f *decFnInfo, rv reflect.Value) { + _ = d.d + ctyp := d.d.ContainerType() + ti := f.ti + var mf MissingFielder + if ti.flagMissingFielder { + mf = rv2i(rv).(MissingFielder) + } else if ti.flagMissingFielderPtr { + mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := d.mapStart(d.d.ReadMapStart()) + if containerLen == 0 { + d.mapEnd() + return + } + hasLen := containerLen >= 0 + var name2 []byte + var rvkencname []byte + tkt := ti.keyType + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.mapElemKey(j == 0) + + if tkt == valueTypeString { + sab, att := d.d.DecodeStringAsBytes() + rvkencname = d.usableStructFieldNameBytes(rvkencname, sab, att) + } else if tkt == valueTypeInt { + rvkencname = strconv.AppendInt(d.b[:0], d.d.DecodeInt64(), 10) + } else if tkt == valueTypeUint { + rvkencname = strconv.AppendUint(d.b[:0], d.d.DecodeUint64(), 10) + } else if tkt == valueTypeFloat { + rvkencname = strconv.AppendFloat(d.b[:0], d.d.DecodeFloat64(), 'f', -1, 64) + } else { + halt.errorStr2("invalid struct key type: ", ti.keyType.String()) + } + + d.mapElemValue() + if si := ti.siForEncName(rvkencname); si != nil { + d.kStructField(si, rv) + } else if mf != nil { + + name2 = append(name2[:0], rvkencname...) + var f interface{} + d.decode(&f) + if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField { + halt.errorStr2("no matching struct field when decoding stream map with key: ", stringView(name2)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + } + d.mapEnd() + } else if ctyp == valueTypeArray { + containerLen := d.arrayStart(d.d.ReadArrayStart()) + if containerLen == 0 { + d.arrayEnd() + return + } + + tisfi := ti.sfi.source() + hasLen := containerLen >= 0 + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + d.arrayElem(j == 0) + if j < len(tisfi) { + d.kStructField(tisfi[j], rv) + } else { + d.structFieldNotFound(j, "") + } + } + + d.arrayEnd() + } else { + halt.onerror(errNeedMapOrArrayDecodeToStruct) + } +} + +func (d *decoderSimpleIO) kSlice(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + rvCanset := rv.CanSet() + + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + rvbs := rvGetBytes(rv) + if rvCanset { + bs2, bst := d.decodeBytesInto(rvbs, false) + if bst != dBytesIntoParamOut { + rvSetBytes(rv, bs2) + } + } else { + + d.decodeBytesInto(rvbs[:len(rvbs):len(rvbs)], true) + } + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset { + if rvIsNil(rv) { + rvSetDirect(rv, rvSliceZeroCap(ti.rt)) + } else { + rvSetSliceLen(rv, 0) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem0Mut := !scalarBitset.isset(ti.elemkind) + rtelem := ti.elem + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnSimpleIO + + var rvChanged bool + + var rv0 = rv + var rv9 reflect.Value + + rvlen := rvLenSlice(rv) + rvcap := rvCapSlice(rv) + maxInitLen := d.maxInitLen() + hasLen := containerLenS >= 0 + if hasLen { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen1 := int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + if rvlen1 == rvlen { + } else if rvlen1 <= rvcap { + if rvCanset { + rvlen = rvlen1 + rvSetSliceLen(rv, rvlen) + } + } else if rvCanset { + rvlen = rvlen1 + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && rtelem0Mut { + rvCopySlice(rv, rv0, rtelem) + } + } else if containerLenS != rvlen { + if rvCanset { + rvlen = containerLenS + rvSetSliceLen(rv, rvlen) + } + } + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } + + var j int + for ; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if rvCanset { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen) + rvcap = rvlen + rvChanged = !rvCanset + } else { + halt.errorStr("cannot decode into non-settable slice") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + + if rvlen < rvcap { + rvlen = rvcap + if rvCanset { + rvSetSliceLen(rv, rvlen) + } else if rvChanged { + rv = rvSlice(rv, rvlen) + } else { + halt.onerror(errExpandSliceCannotChange) + } + } else { + if !(rvCanset || rvChanged) { + halt.onerror(errExpandSliceCannotChange) + } + rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1) + + rvlen = rvcap + rvChanged = !rvCanset + } + } + + rv9 = rvArrayIndex(rv, j, f.ti, true) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if j < rvlen { + if rvCanset { + rvSetSliceLen(rv, j) + } else if rvChanged { + rv = rvSlice(rv, j) + } + + } else if j == 0 && rvIsNil(rv) { + if rvCanset { + rv = rvSliceZeroCap(ti.rt) + rvCanset = false + rvChanged = true + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } +} + +func (d *decoderSimpleIO) kArray(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + ctyp := d.d.ContainerType() + if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) { + + if ti.elemkind != uint8(reflect.Uint8) { + halt.errorf("bytes/string in stream can decode into array of bytes, but not %v", ti.rt) + } + rvbs := rvGetArrayBytes(rv, nil) + d.decodeBytesInto(rvbs, true) + return + } + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var rv9 reflect.Value + + rvlen := rv.Len() + hasLen := containerLenS >= 0 + if hasLen && containerLenS > rvlen { + halt.errorf("cannot decode into array with length: %v, less than container length: %v", any(rvlen), any(containerLenS)) + } + + var elemReset = d.h.SliceElementReset + + var rtelemIsPtr bool + var rtelemElem reflect.Type + var fn *decFnSimpleIO + builtin := ti.tielem.flagDecBuiltin + if builtin { + rtelemIsPtr = ti.elemkind == uint8(reflect.Ptr) + if rtelemIsPtr { + rtelemElem = ti.elem.Elem() + } + } else { + fn = d.fn(rtelem) + } + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if j >= rvlen { + d.arrayCannotExpand(rvlen, j+1) + d.swallow() + continue + } + + rv9 = rvArrayIndex(rv, j, f.ti, false) + if elemReset { + rvSetZero(rv9) + } + if d.d.TryNil() { + rvSetZero(rv9) + } else if builtin { + if rtelemIsPtr { + if rvIsNil(rv9) { + rvSetDirect(rv9, reflect.New(rtelemElem)) + } + d.decode(rv2i(rv9)) + } else { + d.decode(rv2i(rvAddr(rv9, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rv9, fn) + } + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } +} + +func (d *decoderSimpleIO) kChan(f *decFnInfo, rv reflect.Value) { + _ = d.d + + ti := f.ti + if ti.chandir&uint8(reflect.SendDir) == 0 { + halt.errorStr("receive-only channel cannot be decoded") + } + ctyp := d.d.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + + if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) { + halt.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + bs2, _ := d.d.DecodeBytes() + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + return + } + + var rvCanset = rv.CanSet() + + var containerLenS int + isArray := ctyp == valueTypeArray + if isArray { + containerLenS = d.arrayStart(d.d.ReadArrayStart()) + } else if ctyp == valueTypeMap { + containerLenS = d.mapStart(d.d.ReadMapStart()) * 2 + } else { + halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String()) + } + + if containerLenS == 0 { + if rvCanset && rvIsNil(rv) { + rvSetDirect(rv, reflect.MakeChan(ti.rt, 0)) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + return + } + + rtelem := ti.elem + useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient + + for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() { + rtelem = rtelem.Elem() + } + + var fn *decFnSimpleIO + + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + var rvlen int + hasLen := containerLenS >= 0 + maxInitLen := d.maxInitLen() + + for j := 0; d.containerNext(j, containerLenS, hasLen); j++ { + if j == 0 { + if rvIsNil(rv) { + if hasLen { + rvlen = int(decInferLen(containerLenS, maxInitLen, uint(ti.elemsize))) + } else { + rvlen = decDefChanCap + } + if rvCanset { + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } else { + halt.errorStr("cannot decode into non-settable chan") + } + } + if fn == nil { + fn = d.fn(rtelem) + } + } + + if ctyp == valueTypeArray { + d.arrayElem(j == 0) + } else if j&1 == 0 { + d.mapElemKey(j == 0) + } else { + d.mapElemValue() + } + + if rv9.IsValid() { + rvSetZero(rv9) + } else if useTransient { + rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } else { + rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind)) + } + if !d.d.TryNil() { + d.decodeValueNoCheckNil(rv9, fn) + } + rv.Send(rv9) + } + if isArray { + d.arrayEnd() + } else { + d.mapEnd() + } + + if rvChanged { + rvSetDirect(rv0, rv) + } + +} + +func (d *decoderSimpleIO) kMap(f *decFnInfo, rv reflect.Value) { + _ = d.d + containerLen := d.mapStart(d.d.ReadMapStart()) + ti := f.ti + if rvIsNil(rv) { + rvlen := int(decInferLen(containerLen, d.maxInitLen(), uint(ti.keysize+ti.elemsize))) + rvSetDirect(rv, makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + d.mapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := reflect.Kind(ti.elemkind) + ktypeKind := reflect.Kind(ti.keykind) + mparams := getMapReqParams(ti) + + vtypePtr := vtypeKind == reflect.Ptr + ktypePtr := ktypeKind == reflect.Ptr + + vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient + + kTransient := vTransient && !ktypePtr && ti.tikey.flagCanTransient + + var vtypeElem reflect.Type + + var keyFn, valFn *decFnSimpleIO + var ktypeLo, vtypeLo = ktype, vtype + + if ktypeKind == reflect.Ptr { + for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + } + + if vtypePtr { + vtypeElem = vtype.Elem() + for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + } + + rvkMut := !scalarBitset.isset(ti.keykind) + rvvMut := !scalarBitset.isset(ti.elemkind) + rvvCanNil := isnilBitset.isset(ti.elemkind) + + var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value + + var doMapGet, doMapSet bool + + if !d.h.MapValueReset { + if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) { + doMapGet = true + rvva = mapAddrLoopvarRV(vtype, vtypeKind) + } + } + + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen >= 0 + + var kstr2bs []byte + var kstr string + + var mapKeyStringSharesBytesBuf bool + var att dBytesAttachState + + var vElem, kElem reflect.Type + kbuiltin := ti.tikey.flagDecBuiltin && ti.keykind != uint8(reflect.Slice) + vbuiltin := ti.tielem.flagDecBuiltin + if kbuiltin && ktypePtr { + kElem = ti.key.Elem() + } + if vbuiltin && vtypePtr { + vElem = ti.elem.Elem() + } + + for j := 0; d.containerNext(j, containerLen, hasLen); j++ { + mapKeyStringSharesBytesBuf = false + kstr = "" + if j == 0 { + + if kTransient { + rvk = d.perType.TransientAddr2K(ktype, ktypeKind) + } else { + rvk = rvZeroAddrK(ktype, ktypeKind) + } + if !rvkMut { + rvkn = rvk + } + if !rvvMut { + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + } + if !ktypeIsString && keyFn == nil { + keyFn = d.fn(ktypeLo) + } + if valFn == nil { + valFn = d.fn(vtypeLo) + } + } else if rvkMut { + rvSetZero(rvk) + } else { + rvk = rvkn + } + + d.mapElemKey(j == 0) + + if d.d.TryNil() { + rvSetZero(rvk) + } else if ktypeIsString { + kstr2bs, att = d.d.DecodeStringAsBytes() + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, att) + rvSetString(rvk, kstr) + } else { + if kbuiltin { + if ktypePtr { + if rvIsNil(rvk) { + rvSetDirect(rvk, reflect.New(kElem)) + } + d.decode(rv2i(rvk)) + } else { + d.decode(rv2i(rvAddr(rvk, ti.tikey.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvk, keyFn) + } + + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() && rvk2.Type() == uint8SliceTyp { + kstr2bs = rvGetBytes(rvk2) + kstr, mapKeyStringSharesBytesBuf = d.bytes2Str(kstr2bs, dBytesAttachView) + rvSetIntf(rvk, rv4istr(kstr)) + } + + } + } + + if mapKeyStringSharesBytesBuf && d.bufio { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + mapKeyStringSharesBytesBuf = false + } + + d.mapElemValue() + + if d.d.TryNil() { + if mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + + if !rvvz.IsValid() { + rvvz = rvZeroK(vtype, vtypeKind) + } + mapSet(rv, rvk, rvvz, mparams) + continue + } + + doMapSet = true + + if !rvvMut { + rvv = rvvn + } else if !doMapGet { + goto NEW_RVV + } else { + rvv = mapGet(rv, rvk, rvva, mparams) + if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) { + goto NEW_RVV + } + switch vtypeKind { + case reflect.Ptr, reflect.Map: + doMapSet = false + case reflect.Interface: + + rvvn = rvv.Elem() + if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) { + d.decodeValueNoCheckNil(rvvn, nil) + continue + } + + rvvn = rvZeroAddrK(vtype, vtypeKind) + rvSetIntf(rvvn, rvv) + rvv = rvvn + default: + + if vTransient { + rvvn = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvvn = rvZeroAddrK(vtype, vtypeKind) + } + rvSetDirect(rvvn, rvv) + rvv = rvvn + } + } + goto DECODE_VALUE_NO_CHECK_NIL + + NEW_RVV: + if vtypePtr { + rvv = reflect.New(vtypeElem) + } else if vTransient { + rvv = d.perType.TransientAddrK(vtype, vtypeKind) + } else { + rvv = rvZeroAddrK(vtype, vtypeKind) + } + + DECODE_VALUE_NO_CHECK_NIL: + if doMapSet && mapKeyStringSharesBytesBuf { + if ktypeIsString { + rvSetString(rvk, d.detach2Str(kstr2bs, att)) + } else { + rvSetIntf(rvk, rv4istr(d.detach2Str(kstr2bs, att))) + } + } + if vbuiltin { + if vtypePtr { + if rvIsNil(rvv) { + rvSetDirect(rvv, reflect.New(vElem)) + } + d.decode(rv2i(rvv)) + } else { + d.decode(rv2i(rvAddr(rvv, ti.tielem.ptr))) + } + } else { + d.decodeValueNoCheckNil(rvv, valFn) + } + if doMapSet { + mapSet(rv, rvk, rvv, mparams) + } + } + + d.mapEnd() +} + +func (d *decoderSimpleIO) init(h Handle) { + initHandle(h) + callMake(&d.d) + d.hh = h + d.h = h.getBasicHandle() + + d.err = errDecoderNotInitialized + + if d.h.InternString && d.is == nil { + d.is.init() + } + + d.fp = d.d.init(h, &d.decoderBase, d).(*fastpathDsSimpleIO) + + if d.bytes { + d.rtidFn = &d.h.rtidFnsDecBytes + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtBytes + } else { + d.bufio = d.h.ReaderBufferSize > 0 + d.rtidFn = &d.h.rtidFnsDecIO + d.rtidFnNoExt = &d.h.rtidFnsDecNoExtIO + } + + d.reset() + +} + +func (d *decoderSimpleIO) reset() { + d.d.reset() + d.err = nil + d.c = 0 + d.depth = 0 + d.calls = 0 + + d.maxdepth = decDefMaxDepth + if d.h.MaxDepth > 0 { + d.maxdepth = d.h.MaxDepth + } + d.mtid = 0 + d.stid = 0 + d.mtr = false + d.str = false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + _, d.mtr = fastpathAvIndex(d.mtid) + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + _, d.str = fastpathAvIndex(d.stid) + } +} + +func (d *decoderSimpleIO) Reset(r io.Reader) { + if d.bytes { + halt.onerror(errDecNoResetBytesWithReader) + } + d.reset() + if r == nil { + r = &eofReader + } + d.d.resetInIO(r) +} + +func (d *decoderSimpleIO) ResetBytes(in []byte) { + if !d.bytes { + halt.onerror(errDecNoResetReaderWithBytes) + } + d.resetBytes(in) +} + +func (d *decoderSimpleIO) resetBytes(in []byte) { + d.reset() + if in == nil { + in = zeroByteSlice + } + d.d.resetInBytes(in) +} + +func (d *decoderSimpleIO) ResetString(s string) { + d.ResetBytes(bytesView(s)) +} + +func (d *decoderSimpleIO) Decode(v interface{}) (err error) { + + defer panicValToErr(d, callRecoverSentinel, &d.err, &err, debugging) + d.mustDecode(v) + return +} + +func (d *decoderSimpleIO) MustDecode(v interface{}) { + defer panicValToErr(d, callRecoverSentinel, &d.err, nil, true) + d.mustDecode(v) + return +} + +func (d *decoderSimpleIO) mustDecode(v interface{}) { + halt.onerror(d.err) + if d.hh == nil { + halt.onerror(errNoFormatHandle) + } + + d.calls++ + d.decode(v) + d.calls-- +} + +func (d *decoderSimpleIO) Release() {} + +func (d *decoderSimpleIO) swallow() { + d.d.nextValueBytes() +} + +func (d *decoderSimpleIO) nextValueBytes() []byte { + return d.d.nextValueBytes() +} + +func (d *decoderSimpleIO) decode(iv interface{}) { + _ = d.d + + rv, ok := isNil(iv, true) + if ok { + halt.onerror(errCannotDecodeIntoNil) + } + + switch v := iv.(type) { + + case *string: + *v = d.detach2Str(d.d.DecodeStringAsBytes()) + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *uintptr: + *v = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *float32: + *v = d.d.DecodeFloat32() + case *float64: + *v = d.d.DecodeFloat64() + case *complex64: + *v = complex(d.d.DecodeFloat32(), 0) + case *complex128: + *v = complex(d.d.DecodeFloat64(), 0) + case *[]byte: + *v, _ = d.decodeBytesInto(*v, false) + case []byte: + + d.decodeBytesInto(v[:len(v):len(v)], true) + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(rv4iptr(v), nil) + + case reflect.Value: + if ok, _ = isDecodeable(v); !ok { + d.haltAsNotDecodeable(v) + } + d.decodeValue(v, nil) + + default: + + if skipFastpathTypeSwitchInDirectCall || !d.dh.fastpathDecodeTypeSwitch(iv, d) { + if !rv.IsValid() { + rv = reflect.ValueOf(iv) + } + if ok, _ = isDecodeable(rv); !ok { + d.haltAsNotDecodeable(rv) + } + d.decodeValue(rv, nil) + } + } +} + +func (d *decoderSimpleIO) decodeValue(rv reflect.Value, fn *decFnSimpleIO) { + if d.d.TryNil() { + decSetNonNilRV2Zero(rv) + } else { + d.decodeValueNoCheckNil(rv, fn) + } +} + +func (d *decoderSimpleIO) decodeValueNoCheckNil(rv reflect.Value, fn *decFnSimpleIO) { + + var rvp reflect.Value + var rvpValid bool +PTR: + if rv.Kind() == reflect.Ptr { + rvpValid = true + if rvIsNil(rv) { + rvSetDirect(rv, reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + goto PTR + } + + if fn == nil { + fn = d.fn(rv.Type()) + } + if fn.i.addrD { + if rvpValid { + rv = rvp + } else if rv.CanAddr() { + rv = rvAddr(rv, fn.i.ti.ptr) + } else if fn.i.addrDf { + halt.errorStr("cannot decode into a non-pointer value") + } + } + fn.fd(d, &fn.i, rv) +} + +func (d *decoderSimpleIO) decodeAs(v interface{}, t reflect.Type, ext bool) { + if ext { + d.decodeValue(baseRV(v), d.fn(t)) + } else { + d.decodeValue(baseRV(v), d.fnNoExt(t)) + } +} + +func (d *decoderSimpleIO) structFieldNotFound(index int, rvkencname string) { + + if d.h.ErrorIfNoField { + if index >= 0 { + halt.errorInt("no matching struct field found when decoding stream array at index ", int64(index)) + } else if rvkencname != "" { + halt.errorStr2("no matching struct field found when decoding stream map with key ", rvkencname) + } + } + d.swallow() +} + +func (d *decoderSimpleIO) decodeBytesInto(out []byte, mustFit bool) (v []byte, state dBytesIntoState) { + v, att := d.d.DecodeBytes() + if cap(v) == 0 || (att >= dBytesAttachViewZerocopy && !mustFit) { + + return + } + if len(v) == 0 { + v = zeroByteSlice + return + } + if len(out) == len(v) { + state = dBytesIntoParamOut + } else if cap(out) >= len(v) { + out = out[:len(v)] + state = dBytesIntoParamOutSlice + } else if mustFit { + halt.errorf("bytes capacity insufficient for decoded bytes: got/expected: %d/%d", len(v), len(out)) + } else { + out = make([]byte, len(v)) + state = dBytesIntoNew + } + copy(out, v) + v = out + return +} + +func (d *decoderSimpleIO) rawBytes() (v []byte) { + + v = d.d.nextValueBytes() + if d.bytes && !d.h.ZeroCopy { + vv := make([]byte, len(v)) + copy(vv, v) + v = vv + } + return +} + +func (d *decoderSimpleIO) wrapErr(v error, err *error) { + *err = wrapCodecErr(v, d.hh.Name(), d.d.NumBytesRead(), false) +} + +func (d *decoderSimpleIO) NumBytesRead() int { + return d.d.NumBytesRead() +} + +func (d *decoderSimpleIO) containerNext(j, containerLen int, hasLen bool) bool { + + if hasLen { + return j < containerLen + } + return !d.d.CheckBreak() +} + +func (d *decoderSimpleIO) mapElemKey(firstTime bool) { + d.d.ReadMapElemKey(firstTime) + d.c = containerMapKey +} + +func (d *decoderSimpleIO) mapElemValue() { + d.d.ReadMapElemValue() + d.c = containerMapValue +} + +func (d *decoderSimpleIO) mapEnd() { + d.d.ReadMapEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderSimpleIO) arrayElem(firstTime bool) { + d.d.ReadArrayElem(firstTime) + d.c = containerArrayElem +} + +func (d *decoderSimpleIO) arrayEnd() { + d.d.ReadArrayEnd() + d.depthDecr() + d.c = 0 +} + +func (d *decoderSimpleIO) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) { + + var vv interface{} + d.decode(&vv) + ext.UpdateExt(v, vv) + +} + +func (d *decoderSimpleIO) fn(t reflect.Type) *decFnSimpleIO { + return d.dh.decFnViaBH(t, d.rtidFn, d.h, d.fp, false) +} + +func (d *decoderSimpleIO) fnNoExt(t reflect.Type) *decFnSimpleIO { + return d.dh.decFnViaBH(t, d.rtidFnNoExt, d.h, d.fp, true) +} + +func (helperDecDriverSimpleIO) newDecoderBytes(in []byte, h Handle) *decoderSimpleIO { + var c1 decoderSimpleIO + c1.bytes = true + c1.init(h) + c1.ResetBytes(in) + return &c1 +} + +func (helperDecDriverSimpleIO) newDecoderIO(in io.Reader, h Handle) *decoderSimpleIO { + var c1 decoderSimpleIO + c1.init(h) + c1.Reset(in) + return &c1 +} + +func (helperDecDriverSimpleIO) decFnloadFastpathUnderlying(ti *typeInfo, fp *fastpathDsSimpleIO) (f *fastpathDSimpleIO, u reflect.Type) { + rtid := rt2id(ti.fastpathUnderlying) + idx, ok := fastpathAvIndex(rtid) + if !ok { + return + } + f = &fp[idx] + if uint8(reflect.Array) == ti.kind { + u = reflect.ArrayOf(ti.rt.Len(), ti.elem) + } else { + u = f.rt + } + return +} + +func (helperDecDriverSimpleIO) decFindRtidFn(s []decRtidFnSimpleIO, rtid uintptr) (i uint, fn *decFnSimpleIO) { + + var h uint + var j = uint(len(s)) +LOOP: + if i < j { + h = (i + j) >> 1 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (helperDecDriverSimpleIO) decFromRtidFnSlice(fns *atomicRtidFnSlice) (s []decRtidFnSimpleIO) { + if v := fns.load(); v != nil { + s = *(lowLevelToPtr[[]decRtidFnSimpleIO](v)) + } + return +} + +func (dh helperDecDriverSimpleIO) decFnViaBH(rt reflect.Type, fns *atomicRtidFnSlice, x *BasicHandle, fp *fastpathDsSimpleIO, + checkExt bool) (fn *decFnSimpleIO) { + return dh.decFnVia(rt, fns, x.typeInfos(), &x.mu, x.extHandle, fp, + checkExt, x.CheckCircularRef, x.timeBuiltin, x.binaryHandle, x.jsonHandle) +} + +func (dh helperDecDriverSimpleIO) decFnVia(rt reflect.Type, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsSimpleIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnSimpleIO) { + rtid := rt2id(rt) + var sp []decRtidFnSimpleIO = dh.decFromRtidFnSlice(fns) + if sp != nil { + _, fn = dh.decFindRtidFn(sp, rtid) + } + if fn == nil { + fn = dh.decFnViaLoader(rt, rtid, fns, tinfos, mu, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + } + return +} + +func (dh helperDecDriverSimpleIO) decFnViaLoader(rt reflect.Type, rtid uintptr, fns *atomicRtidFnSlice, + tinfos *TypeInfos, mu *sync.Mutex, exth extHandle, fp *fastpathDsSimpleIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnSimpleIO) { + + fn = dh.decFnLoad(rt, rtid, tinfos, exth, fp, checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json) + var sp []decRtidFnSimpleIO + mu.Lock() + sp = dh.decFromRtidFnSlice(fns) + + if sp == nil { + sp = []decRtidFnSimpleIO{{rtid, fn}} + fns.store(ptrToLowLevel(&sp)) + } else { + idx, fn2 := dh.decFindRtidFn(sp, rtid) + if fn2 == nil { + sp2 := make([]decRtidFnSimpleIO, len(sp)+1) + copy(sp2[idx+1:], sp[idx:]) + copy(sp2, sp[:idx]) + sp2[idx] = decRtidFnSimpleIO{rtid, fn} + fns.store(ptrToLowLevel(&sp2)) + } + } + mu.Unlock() + return +} + +func (dh helperDecDriverSimpleIO) decFnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, + exth extHandle, fp *fastpathDsSimpleIO, + checkExt, checkCircularRef, timeBuiltin, binaryEncoding, json bool) (fn *decFnSimpleIO) { + fn = new(decFnSimpleIO) + fi := &(fn.i) + ti := tinfos.get(rtid, rt) + fi.ti = ti + rk := reflect.Kind(ti.kind) + + fi.addrDf = true + + if rtid == timeTypId && timeBuiltin { + fn.fd = (*decoderSimpleIO).kTime + } else if rtid == rawTypId { + fn.fd = (*decoderSimpleIO).raw + } else if rtid == rawExtTypId { + fn.fd = (*decoderSimpleIO).rawExt + fi.addrD = true + } else if xfFn := exth.getExt(rtid, checkExt); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fd = (*decoderSimpleIO).ext + fi.addrD = true + } else if ti.flagSelfer || ti.flagSelferPtr { + fn.fd = (*decoderSimpleIO).selferUnmarshal + fi.addrD = ti.flagSelferPtr + } else if supportMarshalInterfaces && binaryEncoding && + (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) && + (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) { + fn.fd = (*decoderSimpleIO).binaryUnmarshal + fi.addrD = ti.flagBinaryUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && json && + (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) && + (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) { + + fn.fd = (*decoderSimpleIO).jsonUnmarshal + fi.addrD = ti.flagJsonUnmarshalerPtr + } else if supportMarshalInterfaces && !binaryEncoding && + (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) && + (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) { + fn.fd = (*decoderSimpleIO).textUnmarshal + fi.addrD = ti.flagTextUnmarshalerPtr + } else { + if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) { + var rtid2 uintptr + if !ti.flagHasPkgPath { + rtid2 = rtid + if rk == reflect.Array { + rtid2 = rt2id(ti.key) + } + if idx, ok := fastpathAvIndex(rtid2); ok { + fn.fd = fp[idx].decfn + fi.addrD = true + fi.addrDf = false + if rk == reflect.Array { + fi.addrD = false + } + } + } else { + + xfe, xrt := dh.decFnloadFastpathUnderlying(ti, fp) + if xfe != nil { + xfnf2 := xfe.decfn + if rk == reflect.Array { + fi.addrD = false + fn.fd = func(d *decoderSimpleIO, xf *decFnInfo, xrv reflect.Value) { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } else { + fi.addrD = true + fi.addrDf = false + xptr2rt := reflect.PointerTo(xrt) + fn.fd = func(d *decoderSimpleIO, xf *decFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, rvConvert(xrv, xptr2rt)) + } else { + xfnf2(d, xf, rvConvert(xrv, xrt)) + } + } + } + } + } + } + if fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fd = (*decoderSimpleIO).kBool + case reflect.String: + fn.fd = (*decoderSimpleIO).kString + case reflect.Int: + fn.fd = (*decoderSimpleIO).kInt + case reflect.Int8: + fn.fd = (*decoderSimpleIO).kInt8 + case reflect.Int16: + fn.fd = (*decoderSimpleIO).kInt16 + case reflect.Int32: + fn.fd = (*decoderSimpleIO).kInt32 + case reflect.Int64: + fn.fd = (*decoderSimpleIO).kInt64 + case reflect.Uint: + fn.fd = (*decoderSimpleIO).kUint + case reflect.Uint8: + fn.fd = (*decoderSimpleIO).kUint8 + case reflect.Uint16: + fn.fd = (*decoderSimpleIO).kUint16 + case reflect.Uint32: + fn.fd = (*decoderSimpleIO).kUint32 + case reflect.Uint64: + fn.fd = (*decoderSimpleIO).kUint64 + case reflect.Uintptr: + fn.fd = (*decoderSimpleIO).kUintptr + case reflect.Float32: + fn.fd = (*decoderSimpleIO).kFloat32 + case reflect.Float64: + fn.fd = (*decoderSimpleIO).kFloat64 + case reflect.Complex64: + fn.fd = (*decoderSimpleIO).kComplex64 + case reflect.Complex128: + fn.fd = (*decoderSimpleIO).kComplex128 + case reflect.Chan: + fn.fd = (*decoderSimpleIO).kChan + case reflect.Slice: + fn.fd = (*decoderSimpleIO).kSlice + case reflect.Array: + fi.addrD = false + fn.fd = (*decoderSimpleIO).kArray + case reflect.Struct: + if ti.simple { + fn.fd = (*decoderSimpleIO).kStructSimple + } else { + fn.fd = (*decoderSimpleIO).kStruct + } + case reflect.Map: + fn.fd = (*decoderSimpleIO).kMap + case reflect.Interface: + + fn.fd = (*decoderSimpleIO).kInterface + default: + + fn.fd = (*decoderSimpleIO).kErr + } + } + } + return +} +func (e *simpleEncDriverIO) EncodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriverIO) EncodeBool(b bool) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b { + e.EncodeNil() + return + } + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriverIO) EncodeFloat32(f float32) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } + e.w.writen1(simpleVdFloat32) + e.w.writen4(bigen.PutUint32(math.Float32bits(f))) +} + +func (e *simpleEncDriverIO) EncodeFloat64(f float64) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } + e.w.writen1(simpleVdFloat64) + e.w.writen8(bigen.PutUint64(math.Float64bits(f))) +} + +func (e *simpleEncDriverIO) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriverIO) EncodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriverIO) encUint(v uint64, bd uint8) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 { + e.EncodeNil() + return + } + if v <= math.MaxUint8 { + e.w.writen2(bd, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 1) + e.w.writen2(bigen.PutUint16(uint16(v))) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 2) + e.w.writen4(bigen.PutUint32(uint32(v))) + } else { + e.w.writen1(bd + 3) + e.w.writen8(bigen.PutUint64(v)) + } +} + +func (e *simpleEncDriverIO) encLen(bd byte, length int) { + if length == 0 { + e.w.writen1(bd) + } else if length <= math.MaxUint8 { + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + } else if length <= math.MaxUint16 { + e.w.writen1(bd + 2) + e.w.writen2(bigen.PutUint16(uint16(length))) + } else if int64(length) <= math.MaxUint32 { + e.w.writen1(bd + 3) + e.w.writen4(bigen.PutUint32(uint32(length))) + } else { + e.w.writen1(bd + 4) + e.w.writen8(bigen.PutUint64(uint64(length))) + } +} + +func (e *simpleEncDriverIO) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + var bs0, bs []byte + if ext == SelfExt { + bs0 = e.e.blist.get(1024) + bs = bs0 + sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) }) + } else { + bs = ext.WriteExt(v) + } + if bs == nil { + e.writeNilBytes() + goto END + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +END: + if ext == SelfExt { + e.e.blist.put(bs) + if !byteSliceSameData(bs0, bs) { + e.e.blist.put(bs0) + } + } +} + +func (e *simpleEncDriverIO) EncodeRawExt(re *RawExt) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *simpleEncDriverIO) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriverIO) WriteArrayStart(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriverIO) WriteMapStart(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriverIO) WriteArrayEmpty() { + + e.w.writen1(simpleVdArray) +} + +func (e *simpleEncDriverIO) WriteMapEmpty() { + + e.w.writen1(simpleVdMap) +} + +func (e *simpleEncDriverIO) EncodeString(v string) { + if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" { + e.EncodeNil() + return + } + if e.h.StringToRaw { + e.encLen(simpleVdByteArray, len(v)) + } else { + e.encLen(simpleVdString, len(v)) + } + e.w.writestr(v) +} + +func (e *simpleEncDriverIO) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) } + +func (e *simpleEncDriverIO) EncodeStringBytesRaw(v []byte) { + + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +func (e *simpleEncDriverIO) EncodeBytes(v []byte) { + if v == nil { + e.writeNilBytes() + return + } + e.EncodeStringBytesRaw(v) +} + +func (e *simpleEncDriverIO) encodeNilBytes() { + b := byte(simpleVdNil) + if e.h.NilCollectionToZeroLength { + b = simpleVdArray + } + e.w.writen1(b) +} + +func (e *simpleEncDriverIO) writeNilOr(v byte) { + if !e.h.NilCollectionToZeroLength { + v = simpleVdNil + } + e.w.writen1(v) +} + +func (e *simpleEncDriverIO) writeNilArray() { + e.writeNilOr(simpleVdArray) +} + +func (e *simpleEncDriverIO) writeNilMap() { + e.writeNilOr(simpleVdMap) +} + +func (e *simpleEncDriverIO) writeNilBytes() { + e.writeNilOr(simpleVdByteArray) +} + +func (e *simpleEncDriverIO) EncodeTime(t time.Time) { + + if t.IsZero() { + e.EncodeNil() + return + } + v, err := t.MarshalBinary() + halt.onerror(err) + e.w.writen2(simpleVdTime, uint8(len(v))) + e.w.writeb(v) +} + +func (d *simpleDecDriverIO) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *simpleDecDriverIO) advanceNil() (null bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return +} + +func (d *simpleDecDriverIO) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdNil: + d.bdRead = false + return valueTypeNil + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + return valueTypeBytes + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + return valueTypeString + case simpleVdArray, simpleVdArray + 1, + simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + return valueTypeArray + case simpleVdMap, simpleVdMap + 1, + simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + return valueTypeMap + } + return valueTypeUnset +} + +func (d *simpleDecDriverIO) TryNil() bool { + return d.advanceNil() +} + +func (d *simpleDecDriverIO) decFloat() (f float64, ok bool) { + ok = true + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4()))) + case simpleVdFloat64: + f = math.Float64frombits(bigen.Uint64(d.r.readn8())) + default: + ok = false + } + return +} + +func (d *simpleDecDriverIO) decInteger() (ui uint64, neg, ok bool) { + ok = true + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + case simpleVdPosInt + 1: + ui = uint64(bigen.Uint16(d.r.readn2())) + case simpleVdPosInt + 2: + ui = uint64(bigen.Uint32(d.r.readn4())) + case simpleVdPosInt + 3: + ui = uint64(bigen.Uint64(d.r.readn8())) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + neg = true + case simpleVdNegInt + 1: + ui = uint64(bigen.Uint16(d.r.readn2())) + neg = true + case simpleVdNegInt + 2: + ui = uint64(bigen.Uint32(d.r.readn4())) + neg = true + case simpleVdNegInt + 3: + ui = uint64(bigen.Uint64(d.r.readn8())) + neg = true + default: + ok = false + + } + + return +} + +func (d *simpleDecDriverIO) DecodeInt64() (i int64) { + if d.advanceNil() { + return + } + v1, v2, v3 := d.decInteger() + i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false) + d.bdRead = false + return +} + +func (d *simpleDecDriverIO) DecodeUint64() (ui uint64) { + if d.advanceNil() { + return + } + ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger()) + d.bdRead = false + return +} + +func (d *simpleDecDriverIO) DecodeFloat64() (f float64) { + if d.advanceNil() { + return + } + v1, v2 := d.decFloat() + f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false) + d.bdRead = false + return +} + +func (d *simpleDecDriverIO) DecodeBool() (b bool) { + if d.advanceNil() { + return + } + if d.bd == simpleVdFalse { + } else if d.bd == simpleVdTrue { + b = true + } else { + halt.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriverIO) ReadMapStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriverIO) ReadArrayStart() (length int) { + if d.advanceNil() { + return containerLenNil + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriverIO) uint2Len(ui uint64) int { + if chkOvf.Uint(ui, intBitsize) { + halt.errorf("overflow integer: %v", ui) + } + return int(ui) +} + +func (d *simpleDecDriverIO) decLen() int { + switch d.bd & 7 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readn2())) + case 3: + return d.uint2Len(uint64(bigen.Uint32(d.r.readn4()))) + case 4: + return d.uint2Len(bigen.Uint64(d.r.readn8())) + } + halt.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriverIO) DecodeStringAsBytes() ([]byte, dBytesAttachState) { + return d.DecodeBytes() +} + +func (d *simpleDecDriverIO) DecodeBytes() (bs []byte, state dBytesAttachState) { + if d.advanceNil() { + return + } + var cond bool + + if d.bd >= simpleVdArray && d.bd <= simpleVdArray+4 { + slen := d.ReadArrayStart() + bs, cond = usableByteSlice(d.d.buf, slen) + for i := 0; i < len(bs); i++ { + bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8)) + } + for i := len(bs); i < slen; i++ { + bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8))) + } + if cond { + d.d.buf = bs + } + state = dBytesAttachBuffer + + return + } + + clen := d.decLen() + d.bdRead = false + bs, cond = d.r.readxb(uint(clen)) + state = d.d.attachState(cond) + return +} + +func (d *simpleDecDriverIO) DecodeTime() (t time.Time) { + if d.advanceNil() { + return + } + if d.bd != simpleVdTime { + halt.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) + } + d.bdRead = false + clen := uint(d.r.readn1()) + b := d.r.readx(clen) + halt.onerror((&t).UnmarshalBinary(b)) + return +} + +func (d *simpleDecDriverIO) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) { + xbs, _, _, ok := d.decodeExtV(ext != nil, xtag) + if !ok { + return + } + if ext == SelfExt { + sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) }) + } else { + ext.ReadExt(rv, xbs) + } +} + +func (d *simpleDecDriverIO) DecodeRawExt(re *RawExt) { + xbs, realxtag, state, ok := d.decodeExtV(false, 0) + if !ok { + return + } + re.Tag = uint64(realxtag) + re.setData(xbs, state >= dBytesAttachViewZerocopy) +} + +func (d *simpleDecDriverIO) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) { + if xtagIn > 0xff { + halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn) + } + if d.advanceNil() { + return + } + tag := uint8(xtagIn) + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + halt.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs, ok = d.r.readxb(uint(l)) + bstate = d.d.attachState(ok) + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, bstate = d.DecodeBytes() + default: + halt.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) + } + d.bdRead = false + ok = true + return +} + +func (d *simpleDecDriverIO) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdTime: + n.v = valueTypeTime + n.t = d.DecodeTime() + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.d.detach2Str(d.DecodeStringAsBytes()) + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(uint(l)) + + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, + simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) + } + + if !decodeFurther { + d.bdRead = false + } +} + +func (d *simpleDecDriverIO) nextValueBytes() (v []byte) { + if !d.bdRead { + d.readNextBd() + } + d.r.startRecording() + d.nextValueBytesBdReadR() + v = d.r.stopRecording() + d.bdRead = false + return +} + +func (d *simpleDecDriverIO) nextValueBytesBdReadR() { + c := d.bd + + var length uint + + switch c { + case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray: + + case simpleVdPosInt, simpleVdNegInt: + d.r.readn1() + case simpleVdPosInt + 1, simpleVdNegInt + 1: + d.r.skip(2) + case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32: + d.r.skip(4) + case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64: + d.r.skip(8) + case simpleVdTime: + c = d.r.readn1() + d.r.skip(uint(c)) + + default: + switch c & 7 { + case 0: + length = 0 + case 1: + b := d.r.readn1() + length = uint(b) + case 2: + x := d.r.readn2() + length = uint(bigen.Uint16(x)) + case 3: + x := d.r.readn4() + length = uint(bigen.Uint32(x)) + case 4: + x := d.r.readn8() + length = uint(bigen.Uint64(x)) + } + + bExt := c >= simpleVdExt && c <= simpleVdExt+7 + bStr := c >= simpleVdString && c <= simpleVdString+7 + bByteArray := c >= simpleVdByteArray && c <= simpleVdByteArray+7 + bArray := c >= simpleVdArray && c <= simpleVdArray+7 + bMap := c >= simpleVdMap && c <= simpleVdMap+7 + + if !(bExt || bStr || bByteArray || bArray || bMap) { + halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, c) + } + + if bExt { + d.r.readn1() + } + + if length == 0 { + break + } + + if bArray { + for i := uint(0); i < length; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + } + } else if bMap { + for i := uint(0); i < length; i++ { + d.readNextBd() + d.nextValueBytesBdReadR() + d.readNextBd() + d.nextValueBytesBdReadR() + } + } else { + d.r.skip(length) + } + } + return +} + +func (d *simpleEncDriverIO) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) { + callMake(&d.w) + d.h = hh.(*SimpleHandle) + d.e = shared + if shared.bytes { + fp = simpleFpEncBytes + } else { + fp = simpleFpEncIO + } + + d.init2(enc) + return +} + +func (e *simpleEncDriverIO) writeBytesAsis(b []byte) { e.w.writeb(b) } + +func (e *simpleEncDriverIO) writerEnd() { e.w.end() } + +func (e *simpleEncDriverIO) resetOutBytes(out *[]byte) { + e.w.resetBytes(*out, out) +} + +func (e *simpleEncDriverIO) resetOutIO(out io.Writer) { + e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist) +} + +func (d *simpleDecDriverIO) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) { + callMake(&d.r) + d.h = hh.(*SimpleHandle) + d.d = shared + if shared.bytes { + fp = simpleFpDecBytes + } else { + fp = simpleFpDecIO + } + + d.init2(dec) + return +} + +func (d *simpleDecDriverIO) NumBytesRead() int { + return int(d.r.numread()) +} + +func (d *simpleDecDriverIO) resetInBytes(in []byte) { + d.r.resetBytes(in) +} + +func (d *simpleDecDriverIO) resetInIO(r io.Reader) { + d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist) +} + +func (d *simpleDecDriverIO) descBd() string { + return sprintf("%v (%s)", d.bd, simpledesc(d.bd)) +} + +func (d *simpleDecDriverIO) DecodeFloat32() (f float32) { + return float32(chkOvf.Float32V(d.DecodeFloat64())) +} diff --git a/vendor/github.com/ugorji/go/codec/simple.notfastpath.mono.generated.go b/vendor/github.com/ugorji/go/codec/simple.notfastpath.mono.generated.go new file mode 100644 index 000000000..04edb7438 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.notfastpath.mono.generated.go @@ -0,0 +1,52 @@ +//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath) + +// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" +) + +type fastpathESimpleBytes struct { + rt reflect.Type + encfn func(*encoderSimpleBytes, *encFnInfo, reflect.Value) +} +type fastpathDSimpleBytes struct { + rt reflect.Type + decfn func(*decoderSimpleBytes, *decFnInfo, reflect.Value) +} +type fastpathEsSimpleBytes [0]fastpathESimpleBytes +type fastpathDsSimpleBytes [0]fastpathDSimpleBytes + +func (helperEncDriverSimpleBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleBytes) bool { + return false +} +func (helperDecDriverSimpleBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleBytes) bool { + return false +} + +func (helperEncDriverSimpleBytes) fastpathEList() (v *fastpathEsSimpleBytes) { return } +func (helperDecDriverSimpleBytes) fastpathDList() (v *fastpathDsSimpleBytes) { return } + +type fastpathESimpleIO struct { + rt reflect.Type + encfn func(*encoderSimpleIO, *encFnInfo, reflect.Value) +} +type fastpathDSimpleIO struct { + rt reflect.Type + decfn func(*decoderSimpleIO, *decFnInfo, reflect.Value) +} +type fastpathEsSimpleIO [0]fastpathESimpleIO +type fastpathDsSimpleIO [0]fastpathDSimpleIO + +func (helperEncDriverSimpleIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleIO) bool { + return false +} +func (helperDecDriverSimpleIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleIO) bool { + return false +} + +func (helperEncDriverSimpleIO) fastpathEList() (v *fastpathEsSimpleIO) { return } +func (helperDecDriverSimpleIO) fastpathDList() (v *fastpathDsSimpleIO) { return } diff --git a/vendor/github.com/ugorji/go/codec/sort-slice.generated.go b/vendor/github.com/ugorji/go/codec/sort-slice.generated.go deleted file mode 100644 index a755a02af..000000000 --- a/vendor/github.com/ugorji/go/codec/sort-slice.generated.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from sort-slice.go.tmpl - DO NOT EDIT. - -package codec - -import ( - "bytes" - "reflect" - "time" -) - -type stringSlice []string - -func (p stringSlice) Len() int { return len(p) } -func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p stringSlice) Less(i, j int) bool { - return p[uint(i)] < p[uint(j)] -} - -type uint8Slice []uint8 - -func (p uint8Slice) Len() int { return len(p) } -func (p uint8Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p uint8Slice) Less(i, j int) bool { - return p[uint(i)] < p[uint(j)] -} - -type uint64Slice []uint64 - -func (p uint64Slice) Len() int { return len(p) } -func (p uint64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p uint64Slice) Less(i, j int) bool { - return p[uint(i)] < p[uint(j)] -} - -type intSlice []int - -func (p intSlice) Len() int { return len(p) } -func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p intSlice) Less(i, j int) bool { - return p[uint(i)] < p[uint(j)] -} - -type int32Slice []int32 - -func (p int32Slice) Len() int { return len(p) } -func (p int32Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p int32Slice) Less(i, j int) bool { - return p[uint(i)] < p[uint(j)] -} - -type stringRv struct { - v string - r reflect.Value -} -type stringRvSlice []stringRv - -func (p stringRvSlice) Len() int { return len(p) } -func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p stringRvSlice) Less(i, j int) bool { - return p[uint(i)].v < p[uint(j)].v -} - -type stringIntf struct { - v string - i interface{} -} -type stringIntfSlice []stringIntf - -func (p stringIntfSlice) Len() int { return len(p) } -func (p stringIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p stringIntfSlice) Less(i, j int) bool { - return p[uint(i)].v < p[uint(j)].v -} - -type float64Rv struct { - v float64 - r reflect.Value -} -type float64RvSlice []float64Rv - -func (p float64RvSlice) Len() int { return len(p) } -func (p float64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p float64RvSlice) Less(i, j int) bool { - return p[uint(i)].v < p[uint(j)].v || isNaN64(p[uint(i)].v) && !isNaN64(p[uint(j)].v) -} - -type uint64Rv struct { - v uint64 - r reflect.Value -} -type uint64RvSlice []uint64Rv - -func (p uint64RvSlice) Len() int { return len(p) } -func (p uint64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p uint64RvSlice) Less(i, j int) bool { - return p[uint(i)].v < p[uint(j)].v -} - -type int64Rv struct { - v int64 - r reflect.Value -} -type int64RvSlice []int64Rv - -func (p int64RvSlice) Len() int { return len(p) } -func (p int64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p int64RvSlice) Less(i, j int) bool { - return p[uint(i)].v < p[uint(j)].v -} - -type timeRv struct { - v time.Time - r reflect.Value -} -type timeRvSlice []timeRv - -func (p timeRvSlice) Len() int { return len(p) } -func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p timeRvSlice) Less(i, j int) bool { - return p[uint(i)].v.Before(p[uint(j)].v) -} - -type bytesRv struct { - v []byte - r reflect.Value -} -type bytesRvSlice []bytesRv - -func (p bytesRvSlice) Len() int { return len(p) } -func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p bytesRvSlice) Less(i, j int) bool { - return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 -} - -type bytesIntf struct { - v []byte - i interface{} -} -type bytesIntfSlice []bytesIntf - -func (p bytesIntfSlice) Len() int { return len(p) } -func (p bytesIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p bytesIntfSlice) Less(i, j int) bool { - return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 -} diff --git a/vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl b/vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl deleted file mode 100644 index 98209603e..000000000 --- a/vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// Code generated from sort-slice.go.tmpl - DO NOT EDIT. - -{{/* -xxxSlice -xxxIntf -xxxIntfSlice -xxxRv -xxxRvSlice - -I'm now going to create them for -- sortables -- sortablesplus - -With the parameters passed in sortables or sortablesplus, -'time, 'bytes' are special, and correspond to time.Time and []byte respectively. -*/}} - -package codec - -import ( - "time" - "reflect" - "bytes" -) - -{{/* func init() { _ = time.Unix } */}} - -{{define "T"}} -func (p {{ .Type }}) Len() int { return len(p) } -func (p {{ .Type }}) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } -func (p {{ .Type }}) Less(i, j int) bool { - {{ if eq .Kind "bool" }} return !p[uint(i)]{{.V}} && p[uint(j)]{{.V}} - {{ else if eq .Kind "float32" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN32(p[uint(i)]{{.V}}) && !isNaN32(p[uint(j)]{{.V}}) - {{ else if eq .Kind "float64" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN64(p[uint(i)]{{.V}}) && !isNaN64(p[uint(j)]{{.V}}) - {{ else if eq .Kind "time" }} return p[uint(i)]{{.V}}.Before(p[uint(j)]{{.V}}) - {{ else if eq .Kind "bytes" }} return bytes.Compare(p[uint(i)]{{.V}}, p[uint(j)]{{.V}}) == -1 - {{ else }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} - {{ end -}} -} -{{end}} - -{{range $i, $v := sortables }}{{ $t := tshort $v }} -type {{ $v }}Slice []{{ $t }} -{{template "T" args "Kind" $v "Type" (print $v "Slice") "V" ""}} -{{end}} - -{{range $i, $v := sortablesplus }}{{ $t := tshort $v }} - -type {{ $v }}Rv struct { - v {{ $t }} - r reflect.Value -} -type {{ $v }}RvSlice []{{ $v }}Rv -{{template "T" args "Kind" $v "Type" (print $v "RvSlice") "V" ".v"}} - -{{if eq $v "bytes" "string" -}} -type {{ $v }}Intf struct { - v {{ $t }} - i interface{} -} -type {{ $v }}IntfSlice []{{ $v }}Intf -{{template "T" args "Kind" $v "Type" (print $v "IntfSlice") "V" ".v"}} -{{end}} - -{{end}} diff --git a/vendor/github.com/ugorji/go/codec/writer.go b/vendor/github.com/ugorji/go/codec/writer.go index b6e4813f8..fcfce6109 100644 --- a/vendor/github.com/ugorji/go/codec/writer.go +++ b/vendor/github.com/ugorji/go/codec/writer.go @@ -3,10 +3,14 @@ package codec -import "io" +import ( + "io" +) + +const maxConsecutiveEmptyWrites = 16 // 2 is sufficient, 16 is enough, 64 is optimal // encWriter abstracts writing to a byte array or to an io.Writer. -type encWriter interface { +type encWriterI interface { writeb([]byte) writestr(string) writeqstr(string) // write string wrapped in quotes ie "..." @@ -17,7 +21,11 @@ type encWriter interface { writen4([4]byte) writen8([8]byte) + // isBytes() bool end() + + resetIO(w io.Writer, bufsize int, blist *bytesFreeList) + resetBytes(in []byte, out *[]byte) } // --------------------------------------------- @@ -32,16 +40,18 @@ type bufioEncWriter struct { b [16]byte // scratch buffer and padding (cache-aligned) } -func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) { +// MARKER: use setByteAt/byteAt to elide the bounds-checks +// when we are sure that we don't go beyond the bounds. + +func (z *bufioEncWriter) resetBytes(in []byte, out *[]byte) { + halt.errorStr("resetBytes is unsupported by bufioEncWriter") +} + +func (z *bufioEncWriter) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) { z.w = w z.n = 0 - if bufsize <= 0 { - bufsize = defEncByteBufSize - } - // bufsize must be >= 8, to accomodate writen methods (where n <= 8) - if bufsize <= 8 { - bufsize = 8 - } + // use minimum bufsize of 16, matching the array z.b and accomodating writen methods (where n <= 8) + bufsize = max(16, bufsize) // max(byteBufSize, bufsize) if cap(z.buf) < bufsize { if len(z.buf) > 0 && &z.buf[0] != &z.b[0] { blist.put(z.buf) @@ -56,17 +66,19 @@ func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) { } func (z *bufioEncWriter) flushErr() (err error) { - n, err := z.w.Write(z.buf[:z.n]) - z.n -= n - if z.n > 0 { - if err == nil { - err = io.ErrShortWrite + var n int + for i := maxConsecutiveEmptyReads; i > 0; i-- { + n, err = z.w.Write(z.buf[:z.n]) + z.n -= n + if z.n == 0 || err != nil { + return } + // at this point: z.n > 0 && err == nil if n > 0 { copy(z.buf, z.buf[n:z.n+n]) } } - return err + return io.ErrShortWrite // OR io.ErrNoProgress: not enough (or no) data written } func (z *bufioEncWriter) flush() { @@ -131,6 +143,7 @@ func (z *bufioEncWriter) writen1(b1 byte) { // z.buf[z.n] = b1 z.n++ } + func (z *bufioEncWriter) writen2(b1, b2 byte) { if 2 > len(z.buf)-z.n { z.flush() @@ -169,8 +182,14 @@ func (z *bufioEncWriter) endErr() (err error) { return } +func (z *bufioEncWriter) end() { + halt.onerror(z.endErr()) +} + // --------------------------------------------- +var bytesEncAppenderDefOut = []byte{} + // bytesEncAppender implements encWriter and can write to an byte slice. type bytesEncAppender struct { b []byte @@ -203,122 +222,18 @@ func (z *bytesEncAppender) writen4(b [4]byte) { func (z *bytesEncAppender) writen8(b [8]byte) { z.b = append(z.b, b[:]...) - // z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]) // prevents inlining encWr.writen4 + // z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]) } -func (z *bytesEncAppender) endErr() error { +func (z *bytesEncAppender) end() { *(z.out) = z.b - return nil } -func (z *bytesEncAppender) reset(in []byte, out *[]byte) { + +func (z *bytesEncAppender) resetBytes(in []byte, out *[]byte) { z.b = in[:0] z.out = out } -// -------------------------------------------------- - -type encWr struct { - wb bytesEncAppender - wf *bufioEncWriter - - bytes bool // encoding to []byte - - // MARKER: these fields below should belong directly in Encoder. - // we pack them here for space efficiency and cache-line optimization. - - js bool // is json encoder? - be bool // is binary encoder? - - c containerState - - calls uint16 - seq uint16 // sequencer (e.g. used by binc for symbols, etc) +func (z *bytesEncAppender) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) { + halt.errorStr("resetIO is unsupported by bytesEncAppender") } - -// MARKER: manually inline bytesEncAppender.writenx/writeqstr methods, -// as calling them causes encWr.writenx/writeqstr methods to not be inlined (cost > 80). -// -// i.e. e.g. instead of writing z.wb.writen2(b1, b2), use z.wb.b = append(z.wb.b, b1, b2) - -func (z *encWr) writeb(s []byte) { - if z.bytes { - z.wb.writeb(s) - } else { - z.wf.writeb(s) - } -} -func (z *encWr) writestr(s string) { - if z.bytes { - z.wb.writestr(s) - } else { - z.wf.writestr(s) - } -} - -// MARKER: Add WriteStr to be called directly by generated code without a genHelper forwarding function. -// Go's inlining model adds cost for forwarding functions, preventing inlining (cost goes above 80 budget). - -func (z *encWr) WriteStr(s string) { - if z.bytes { - z.wb.writestr(s) - } else { - z.wf.writestr(s) - } -} - -func (z *encWr) writen1(b1 byte) { - if z.bytes { - z.wb.writen1(b1) - } else { - z.wf.writen1(b1) - } -} - -func (z *encWr) writen2(b1, b2 byte) { - if z.bytes { - // MARKER: z.wb.writen2(b1, b2) - z.wb.b = append(z.wb.b, b1, b2) - } else { - z.wf.writen2(b1, b2) - } -} - -func (z *encWr) writen4(b [4]byte) { - if z.bytes { - // MARKER: z.wb.writen4(b1, b2, b3, b4) - z.wb.b = append(z.wb.b, b[:]...) - // z.wb.writen4(b) - } else { - z.wf.writen4(b) - } -} -func (z *encWr) writen8(b [8]byte) { - if z.bytes { - // z.wb.b = append(z.wb.b, b[:]...) - z.wb.writen8(b) - } else { - z.wf.writen8(b) - } -} - -func (z *encWr) writeqstr(s string) { - if z.bytes { - // MARKER: z.wb.writeqstr(s) - z.wb.b = append(append(append(z.wb.b, '"'), s...), '"') - } else { - z.wf.writeqstr(s) - } -} - -func (z *encWr) endErr() error { - if z.bytes { - return z.wb.endErr() - } - return z.wf.endErr() -} - -func (z *encWr) end() { - halt.onerror(z.endErr()) -} - -var _ encWriter = (*encWr)(nil) diff --git a/vendor/github.com/uptrace/bun/CHANGELOG.md b/vendor/github.com/uptrace/bun/CHANGELOG.md index 8d59c5c4a..059721b36 100644 --- a/vendor/github.com/uptrace/bun/CHANGELOG.md +++ b/vendor/github.com/uptrace/bun/CHANGELOG.md @@ -1,3 +1,52 @@ +## [1.2.14](https://github.com/uptrace/bun/compare/v1.2.13...v1.2.14) (2025-06-16) + + +### Bug Fixes + +* restore q.limit check ([07d32c1](https://github.com/uptrace/bun/commit/07d32c1662015a398322fdbc0dc34c5f0d10ce44)) + + + +## [1.2.13](https://github.com/uptrace/bun/compare/v1.2.12...v1.2.13) (2025-06-11) + + +### Bug Fixes + +* **query:** scanAndCount without model ([07fb7ec](https://github.com/uptrace/bun/commit/07fb7ec540979d0625cfeb771a0679c5982c6e2a)), closes [#1209](https://github.com/uptrace/bun/issues/1209) +* sort fk constraints before appending ([c87fa90](https://github.com/uptrace/bun/commit/c87fa903c56743e24a2cb677e8e96fd5c802fba5)) +* use slices sort ([8555900](https://github.com/uptrace/bun/commit/8555900ad840d9b6e73c8655af4f1b6766bc943b)) + + + +## [1.2.12](https://github.com/uptrace/bun/compare/v1.2.11...v1.2.12) (2025-06-05) + + +### Bug Fixes + +* **automigrate:** append SQL to separate []byte slices ([f44a349](https://github.com/uptrace/bun/commit/f44a349ec61b09f9f0240a923e121cbaa3ab1d14)) +* **gh-1160:** add WithExcludeForeignKeys option ([63141cb](https://github.com/uptrace/bun/commit/63141cb6c9a6d0d2abf4b41eac5b1c6078884326)), closes [#1160](https://github.com/uptrace/bun/issues/1160) +* iss-824 to allow mssql to support non unicode strings ([0565763](https://github.com/uptrace/bun/commit/056576355a0a7ff75f616cedb5d81144f6657a6a)) +* **migrations:** skip template rendering if no data + fix tests ([4055827](https://github.com/uptrace/bun/commit/4055827e1af4f0b7e13879d393c1131ab497d962)) +* **pgdriver:** rename channelOverflowHandler to ChannelOverflowHandler for public API ([65760a9](https://github.com/uptrace/bun/commit/65760a9e648a1ae379982e5d8737d6d864f6a8e3)) +* relation join data race ([37971d7](https://github.com/uptrace/bun/commit/37971d7f83042ab83e52be1c122083f8a98a1efa)) +* report BIGSERIAL ~ BIGINT in pgdialect ([ad7356a](https://github.com/uptrace/bun/commit/ad7356a772324950cf866b86d23771fc53f83505)) +* skip automigrator test early ([5b22710](https://github.com/uptrace/bun/commit/5b22710f0b4d980ebec38fcd306bf459dc1eb615)) +* start sequence with last+1 ([7fbf34a](https://github.com/uptrace/bun/commit/7fbf34a69ff249c72af522331a4f6116f240630a)) + + +### Features + +* add support for netip.Addr and netip.Prefix ([63ccc8f](https://github.com/uptrace/bun/commit/63ccc8f530092c3dfc71179b94a43db452fa54ec)) +* exclude tables using LIKE pattern ([5351f7e](https://github.com/uptrace/bun/commit/5351f7ed4fe53662386e697cc551ba54487da018)) +* **migrations:** support Go templates in SQL migrations ([d92e29e](https://github.com/uptrace/bun/commit/d92e29e459ae2804ad48e1b4f6a8147211a47a57)) +* **pg:** allow user config buffer size of pg's connect ([e2f2650](https://github.com/uptrace/bun/commit/e2f2650950d13442d45694b7cd186b77b4e8e0bb)), closes [#1201](https://github.com/uptrace/bun/issues/1201) +* **pgdriver:** add option for tracing ([80c5e3c](https://github.com/uptrace/bun/commit/80c5e3c684c410dfc02170cfb8671bb8b1db2e35)), closes [#1150](https://github.com/uptrace/bun/issues/1150) +* **pgdriver:** add overflow handler to listener channel ([6f0e3a1](https://github.com/uptrace/bun/commit/6f0e3a1d33de5a61625d22ba6464bfe5da404a11)) +* set notnull=true for autoincrement columns ([1bd5dd7](https://github.com/uptrace/bun/commit/1bd5dd73ce943235a403c5896b6e70401b194093)) +* support changing column type to SERIAL ([136b480](https://github.com/uptrace/bun/commit/136b480e6835dd9a12b4925f57225fb73d0aa7ae)) + + + ## [1.2.11](https://github.com/uptrace/bun/compare/v1.2.10...v1.2.11) (2025-03-05) diff --git a/vendor/github.com/uptrace/bun/Makefile b/vendor/github.com/uptrace/bun/Makefile index 255d0f7ee..0bbfb5653 100644 --- a/vendor/github.com/uptrace/bun/Makefile +++ b/vendor/github.com/uptrace/bun/Makefile @@ -6,6 +6,7 @@ test: echo "go test in $${dir}"; \ (cd "$${dir}" && \ go test && \ + env RACETEST=1 go test -race && \ env GOOS=linux GOARCH=386 TZ= go test && \ go vet); \ done diff --git a/vendor/github.com/uptrace/bun/dialect/pgdialect/alter_table.go b/vendor/github.com/uptrace/bun/dialect/pgdialect/alter_table.go index d20f8c069..4a2cc8864 100644 --- a/vendor/github.com/uptrace/bun/dialect/pgdialect/alter_table.go +++ b/vendor/github.com/uptrace/bun/dialect/pgdialect/alter_table.go @@ -1,6 +1,7 @@ package pgdialect import ( + "context" "fmt" "strings" @@ -57,6 +58,11 @@ func (m *migrator) AppendSQL(b []byte, operation interface{}) (_ []byte, err err case *migrate.DropUniqueConstraintOp: b, err = m.dropConstraint(fmter, appendAlterTable(b, change.TableName), change.Unique.Name) case *migrate.ChangeColumnTypeOp: + // If column changes to SERIAL, create sequence first. + // https://gist.github.com/oleglomako/185df689706c5499612a0d54d3ffe856 + if !change.From.GetIsAutoIncrement() && change.To.GetIsAutoIncrement() { + change.To, b, err = m.createDefaultSequence(fmter, b, change) + } b, err = m.changeColumnType(fmter, appendAlterTable(b, change.TableName), change) case *migrate.AddForeignKeyOp: b, err = m.addForeignKey(fmter, appendAlterTable(b, change.TableName()), change) @@ -187,6 +193,39 @@ func (m *migrator) addForeignKey(fmter schema.Formatter, b []byte, add *migrate. return b, nil } +// createDefaultSequence creates a SEQUENCE to back a serial column. +// Having a backing sequence is necessary to change column type to SERIAL. +// The updated Column's default is set to "nextval" of the new sequence. +func (m *migrator) createDefaultSequence(_ schema.Formatter, b []byte, op *migrate.ChangeColumnTypeOp) (_ sqlschema.Column, _ []byte, err error) { + var last int + if err = m.db.NewSelect().Table(op.TableName). + ColumnExpr("MAX(?)", op.Column).Scan(context.TODO(), &last); err != nil { + return nil, b, err + } + seq := op.TableName + "_" + op.Column + "_seq" + fqn := op.TableName + "." + op.Column + + // A sequence that is OWNED BY a table will be dropped + // if the table is dropped with CASCADE action. + b = append(b, "CREATE SEQUENCE "...) + b = append(b, seq...) + b = append(b, " START WITH "...) + b = append(b, fmt.Sprint(last+1)...) // start with next value + b = append(b, " OWNED BY "...) + b = append(b, fqn...) + b = append(b, ";\n"...) + + return &Column{ + Name: op.To.GetName(), + SQLType: op.To.GetSQLType(), + VarcharLen: op.To.GetVarcharLen(), + DefaultValue: fmt.Sprintf("nextval('%s'::regclass)", seq), + IsNullable: op.To.GetIsNullable(), + IsAutoIncrement: op.To.GetIsAutoIncrement(), + IsIdentity: op.To.GetIsIdentity(), + }, b, nil +} + func (m *migrator) changeColumnType(fmter schema.Formatter, b []byte, colDef *migrate.ChangeColumnTypeOp) (_ []byte, err error) { // alterColumn never re-assigns err, so there is no need to check for err != nil after calling it var i int diff --git a/vendor/github.com/uptrace/bun/dialect/pgdialect/inspector.go b/vendor/github.com/uptrace/bun/dialect/pgdialect/inspector.go index 040df439c..ea5269ac2 100644 --- a/vendor/github.com/uptrace/bun/dialect/pgdialect/inspector.go +++ b/vendor/github.com/uptrace/bun/dialect/pgdialect/inspector.go @@ -5,7 +5,6 @@ import ( "strings" "github.com/uptrace/bun" - "github.com/uptrace/bun/internal/ordered" "github.com/uptrace/bun/migrate/sqlschema" ) @@ -34,13 +33,12 @@ func newInspector(db *bun.DB, options ...sqlschema.InspectorOption) *Inspector { func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) { dbSchema := Schema{ - Tables: ordered.NewMap[string, sqlschema.Table](), ForeignKeys: make(map[sqlschema.ForeignKey]string), } exclude := in.ExcludeTables if len(exclude) == 0 { - // Avoid getting NOT IN (NULL) if bun.In() is called with an empty slice. + // Avoid getting NOT LIKE ALL (ARRAY[NULL]) if bun.In() is called with an empty slice. exclude = []string{""} } @@ -61,7 +59,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) { return dbSchema, err } - colDefs := ordered.NewMap[string, sqlschema.Column]() + var colDefs []sqlschema.Column uniqueGroups := make(map[string][]string) for _, c := range columns { @@ -72,7 +70,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) { def = strings.ToLower(def) } - colDefs.Store(c.Name, &Column{ + colDefs = append(colDefs, &Column{ Name: c.Name, SQLType: c.DataType, VarcharLen: c.VarcharLen, @@ -103,7 +101,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) { } } - dbSchema.Tables.Store(table.Name, &Table{ + dbSchema.Tables = append(dbSchema.Tables, &Table{ Schema: table.Schema, Name: table.Name, Columns: colDefs, @@ -113,10 +111,14 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) { } for _, fk := range fks { - dbSchema.ForeignKeys[sqlschema.ForeignKey{ + dbFK := sqlschema.ForeignKey{ From: sqlschema.NewColumnReference(fk.SourceTable, fk.SourceColumns...), To: sqlschema.NewColumnReference(fk.TargetTable, fk.TargetColumns...), - }] = fk.ConstraintName + } + if _, exclude := in.ExcludeForeignKeys[dbFK]; exclude { + continue + } + dbSchema.ForeignKeys[dbFK] = fk.ConstraintName } return dbSchema, nil } @@ -185,7 +187,7 @@ FROM information_schema.tables "t" WHERE table_type = 'BASE TABLE' AND "t".table_schema = ? AND "t".table_schema NOT LIKE 'pg_%' - AND "table_name" NOT IN (?) + AND "table_name" NOT LIKE ALL (ARRAY[?]) ORDER BY "t".table_schema, "t".table_name ` @@ -291,7 +293,8 @@ WHERE co.contype = 'f' AND co.conrelid IN (SELECT oid FROM pg_class WHERE relkind = 'r') AND ARRAY_POSITION(co.conkey, sc.attnum) = ARRAY_POSITION(co.confkey, tc.attnum) AND ss.nspname = ? - AND s.relname NOT IN (?) AND "t".relname NOT IN (?) + AND s.relname NOT LIKE ALL (ARRAY[?]) + AND "t".relname NOT LIKE ALL (ARRAY[?]) GROUP BY "constraint_name", "schema_name", "table_name", target_schema, target_table ` ) diff --git a/vendor/github.com/uptrace/bun/dialect/pgdialect/sqltype.go b/vendor/github.com/uptrace/bun/dialect/pgdialect/sqltype.go index 121a3d691..5f35a29ec 100644 --- a/vendor/github.com/uptrace/bun/dialect/pgdialect/sqltype.go +++ b/vendor/github.com/uptrace/bun/dialect/pgdialect/sqltype.go @@ -127,6 +127,9 @@ var ( char = newAliases(pgTypeChar, pgTypeCharacter) varchar = newAliases(pgTypeVarchar, pgTypeCharacterVarying) timestampTz = newAliases(sqltype.Timestamp, pgTypeTimestampTz, pgTypeTimestampWithTz) + bigint = newAliases(sqltype.BigInt, pgTypeBigSerial) + integer = newAliases(sqltype.Integer, pgTypeSerial) + smallint = newAliases(sqltype.SmallInt, pgTypeSmallSerial) ) func (d *Dialect) CompareType(col1, col2 sqlschema.Column) bool { @@ -143,6 +146,10 @@ func (d *Dialect) CompareType(col1, col2 sqlschema.Column) bool { return checkVarcharLen(col1, col2, d.DefaultVarcharLen()) case timestampTz.IsAlias(typ1) && timestampTz.IsAlias(typ2): return true + case bigint.IsAlias(typ1) && bigint.IsAlias(typ2), + integer.IsAlias(typ1) && integer.IsAlias(typ2), + smallint.IsAlias(typ1) && smallint.IsAlias(typ2): + return true } return false } diff --git a/vendor/github.com/uptrace/bun/dialect/pgdialect/version.go b/vendor/github.com/uptrace/bun/dialect/pgdialect/version.go index d646f564f..c774ccc50 100644 --- a/vendor/github.com/uptrace/bun/dialect/pgdialect/version.go +++ b/vendor/github.com/uptrace/bun/dialect/pgdialect/version.go @@ -2,5 +2,5 @@ package pgdialect // Version is the current release version. func Version() string { - return "1.2.11" + return "1.2.14" } diff --git a/vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go b/vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go index d03bb5e9d..8e82dcb5d 100644 --- a/vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go +++ b/vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go @@ -2,5 +2,5 @@ package sqlitedialect // Version is the current release version. func Version() string { - return "1.2.11" + return "1.2.14" } diff --git a/vendor/github.com/uptrace/bun/migrate/auto.go b/vendor/github.com/uptrace/bun/migrate/auto.go index 8656902ca..51868d50d 100644 --- a/vendor/github.com/uptrace/bun/migrate/auto.go +++ b/vendor/github.com/uptrace/bun/migrate/auto.go @@ -17,17 +17,21 @@ import ( type AutoMigratorOption func(m *AutoMigrator) -// WithModel adds a bun.Model to the scope of migrations. +// WithModel adds a bun.Model to the migration scope. func WithModel(models ...interface{}) AutoMigratorOption { return func(m *AutoMigrator) { m.includeModels = append(m.includeModels, models...) } } -// WithExcludeTable tells the AutoMigrator to ignore a table in the database. +// WithExcludeTable tells AutoMigrator to exclude database tables from the migration scope. // This prevents AutoMigrator from dropping tables which may exist in the schema // but which are not used by the application. // +// Expressions may make use of the wildcards supported by the SQL LIKE operator: +// - % as a wildcard +// - _ as a single character +// // Do not exclude tables included via WithModel, as BunModelInspector ignores this setting. func WithExcludeTable(tables ...string) AutoMigratorOption { return func(m *AutoMigrator) { @@ -35,7 +39,17 @@ func WithExcludeTable(tables ...string) AutoMigratorOption { } } -// WithSchemaName changes the default database schema to migrate objects in. +// WithExcludeForeignKeys tells AutoMigrator to exclude a foreign key constaint +// from the migration scope. This prevents AutoMigrator from dropping foreign keys +// that are defined manually via CreateTableQuery.ForeignKey(). +func WithExcludeForeignKeys(fks ...sqlschema.ForeignKey) AutoMigratorOption { + return func(m *AutoMigrator) { + m.excludeForeignKeys = append(m.excludeForeignKeys, fks...) + } +} + +// WithSchemaName sets the database schema to migrate objects in. +// By default, dialects' default schema is used. func WithSchemaName(schemaName string) AutoMigratorOption { return func(m *AutoMigrator) { m.schemaName = schemaName @@ -82,7 +96,7 @@ func WithMigrationsDirectoryAuto(directory string) AutoMigratorOption { // database schema automatically. // // Usage: -// 1. Generate migrations and apply them au once with AutoMigrator.Migrate(). +// 1. Generate migrations and apply them at once with AutoMigrator.Migrate(). // 2. Create up- and down-SQL migration files and apply migrations using Migrator.Migrate(). // // While both methods produce complete, reversible migrations (with entries in the database @@ -124,8 +138,8 @@ type AutoMigrator struct { // includeModels define the migration scope. includeModels []interface{} - // excludeTables are excluded from database inspection. - excludeTables []string + excludeTables []string // excludeTables are excluded from database inspection. + excludeForeignKeys []sqlschema.ForeignKey // excludeForeignKeys are excluded from database inspection. // diffOpts are passed to detector constructor. diffOpts []diffOption @@ -150,7 +164,11 @@ func NewAutoMigrator(db *bun.DB, opts ...AutoMigratorOption) (*AutoMigrator, err } am.excludeTables = append(am.excludeTables, am.table, am.locksTable) - dbInspector, err := sqlschema.NewInspector(db, sqlschema.WithSchemaName(am.schemaName), sqlschema.WithExcludeTables(am.excludeTables...)) + dbInspector, err := sqlschema.NewInspector(db, + sqlschema.WithSchemaName(am.schemaName), + sqlschema.WithExcludeTables(am.excludeTables...), + sqlschema.WithExcludeForeignKeys(am.excludeForeignKeys...), + ) if err != nil { return nil, err } @@ -252,12 +270,12 @@ func (am *AutoMigrator) createSQLMigrations(ctx context.Context, transactional b migrations := NewMigrations(am.migrationsOpts...) migrations.Add(Migration{ Name: name, - Up: changes.Up(am.dbMigrator), - Down: changes.Down(am.dbMigrator), + Up: wrapMigrationFunc(changes.Up(am.dbMigrator)), + Down: wrapMigrationFunc(changes.Down(am.dbMigrator)), Comment: "Changes detected by bun.AutoMigrator", }) - // Append .tx.up.sql or .up.sql to migration name, dependin if it should be transactional. + // Append .tx.up.sql or .up.sql to migration name, depending if it should be transactional. fname := func(direction string) string { return name + map[bool]string{true: ".tx.", false: "."}[transactional] + direction + ".sql" } @@ -336,7 +354,7 @@ func (c *changeset) apply(ctx context.Context, db *bun.DB, m sqlschema.Migrator) } for _, op := range c.operations { - if _, isComment := op.(*comment); isComment { + if _, skip := op.(*Unimplemented); skip { continue } @@ -359,17 +377,22 @@ func (c *changeset) WriteTo(w io.Writer, m sqlschema.Migrator) error { b := internal.MakeQueryBytes() for _, op := range c.operations { - if c, isComment := op.(*comment); isComment { + if comment, isComment := op.(*Unimplemented); isComment { b = append(b, "/*\n"...) - b = append(b, *c...) + b = append(b, *comment...) b = append(b, "\n*/"...) continue } - b, err = m.AppendSQL(b, op) + // Append each query separately, merge later. + // Dialects assume that the []byte only holds + // the contents of a single query and may be misled. + queryBytes := internal.MakeQueryBytes() + queryBytes, err = m.AppendSQL(queryBytes, op) if err != nil { return fmt.Errorf("write changeset: %w", err) } + b = append(b, queryBytes...) b = append(b, ";\n"...) } if _, err := w.Write(b); err != nil { @@ -409,7 +432,7 @@ func (c *changeset) ResolveDependencies() error { } // visit iterates over c.operations until it finds all operations that depend on the current one - // or runs into cirtular dependency, in which case it will return an error. + // or runs into circular dependency, in which case it will return an error. visit = func(op Operation) error { switch status[op] { case visited: diff --git a/vendor/github.com/uptrace/bun/migrate/diff.go b/vendor/github.com/uptrace/bun/migrate/diff.go index e05d54b7d..d12c11cb5 100644 --- a/vendor/github.com/uptrace/bun/migrate/diff.go +++ b/vendor/github.com/uptrace/bun/migrate/diff.go @@ -1,6 +1,7 @@ package migrate import ( + "github.com/uptrace/bun/internal/ordered" "github.com/uptrace/bun/migrate/sqlschema" ) @@ -22,8 +23,8 @@ func diff(got, want sqlschema.Database, opts ...diffOption) *changeset { } func (d *detector) detectChanges() *changeset { - currentTables := d.current.GetTables() - targetTables := d.target.GetTables() + currentTables := toOrderedMap(d.current.GetTables()) + targetTables := toOrderedMap(d.target.GetTables()) RenameCreate: for _, wantPair := range targetTables.Pairs() { @@ -99,10 +100,19 @@ RenameCreate: return &d.changes } -// detechColumnChanges finds renamed columns and, if checkType == true, columns with changed type. +// toOrderedMap transforms a slice of objects to an ordered map, using return of GetName() as key. +func toOrderedMap[V interface{ GetName() string }](named []V) *ordered.Map[string, V] { + m := ordered.NewMap[string, V]() + for _, v := range named { + m.Store(v.GetName(), v) + } + return m +} + +// detectColumnChanges finds renamed columns and, if checkType == true, columns with changed type. func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkType bool) { - currentColumns := current.GetColumns() - targetColumns := target.GetColumns() + currentColumns := toOrderedMap(current.GetColumns()) + targetColumns := toOrderedMap(target.GetColumns()) ChangeRename: for _, tPair := range targetColumns.Pairs() { @@ -265,7 +275,7 @@ type detector struct { // cmpType determines column type equivalence. // Default is direct comparison with '==' operator, which is inaccurate // due to the existence of dialect-specific type aliases. The caller - // should pass a concrete InspectorDialect.EquuivalentType for robust comparison. + // should pass a concrete InspectorDialect.EquivalentType for robust comparison. cmpType CompareTypeFunc } @@ -283,7 +293,7 @@ func (d detector) equalColumns(col1, col2 sqlschema.Column) bool { } func (d detector) makeTargetColDef(current, target sqlschema.Column) sqlschema.Column { - // Avoid unneccessary type-change migrations if the types are equivalent. + // Avoid unnecessary type-change migrations if the types are equivalent. if d.cmpType(current, target) { target = &sqlschema.BaseColumn{ Name: target.GetName(), @@ -311,8 +321,7 @@ func equalSignatures(t1, t2 sqlschema.Table, eq CompareTypeFunc) bool { // signature is a set of column definitions, which allows "relation/name-agnostic" comparison between them; // meaning that two columns are considered equal if their types are the same. type signature struct { - - // underlying stores the number of occurences for each unique column type. + // underlying stores the number of occurrences for each unique column type. // It helps to account for the fact that a table might have multiple columns that have the same type. underlying map[sqlschema.BaseColumn]int @@ -330,7 +339,7 @@ func newSignature(t sqlschema.Table, eq CompareTypeFunc) signature { // scan iterates over table's field and counts occurrences of each unique column definition. func (s *signature) scan(t sqlschema.Table) { - for _, icol := range t.GetColumns().Values() { + for _, icol := range t.GetColumns() { scanCol := icol.(*sqlschema.BaseColumn) // This is slightly more expensive than if the columns could be compared directly // and we always did s.underlying[col]++, but we get type-equivalence in return. @@ -368,7 +377,7 @@ func (s *signature) Equals(other signature) bool { } // refMap is a utility for tracking superficial changes in foreign keys, -// which do not require any modificiation in the database. +// which do not require any modification in the database. // Modern SQL dialects automatically updated foreign key constraints whenever // a column or a table is renamed. Detector can use refMap to ignore any // differences in foreign keys which were caused by renamed column/table. diff --git a/vendor/github.com/uptrace/bun/migrate/migration.go b/vendor/github.com/uptrace/bun/migrate/migration.go index 3f4076d2b..4d60a5858 100644 --- a/vendor/github.com/uptrace/bun/migrate/migration.go +++ b/vendor/github.com/uptrace/bun/migrate/migration.go @@ -9,6 +9,7 @@ import ( "io/fs" "sort" "strings" + "text/template" "time" "github.com/uptrace/bun" @@ -23,8 +24,8 @@ type Migration struct { GroupID int64 MigratedAt time.Time `bun:",notnull,nullzero,default:current_timestamp"` - Up MigrationFunc `bun:"-"` - Down MigrationFunc `bun:"-"` + Up internalMigrationFunc `bun:"-"` + Down internalMigrationFunc `bun:"-"` } func (m Migration) String() string { @@ -35,23 +36,57 @@ func (m Migration) IsApplied() bool { return m.ID > 0 } +type internalMigrationFunc func(ctx context.Context, db *bun.DB, templateData any) error + type MigrationFunc func(ctx context.Context, db *bun.DB) error -func NewSQLMigrationFunc(fsys fs.FS, name string) MigrationFunc { - return func(ctx context.Context, db *bun.DB) error { +func NewSQLMigrationFunc(fsys fs.FS, name string) internalMigrationFunc { + return func(ctx context.Context, db *bun.DB, templateData any) error { f, err := fsys.Open(name) if err != nil { return err } isTx := strings.HasSuffix(name, ".tx.up.sql") || strings.HasSuffix(name, ".tx.down.sql") - return Exec(ctx, db, f, isTx) + return Exec(ctx, db, f, templateData, isTx) } } +func wrapMigrationFunc(fn MigrationFunc) internalMigrationFunc { + return func(ctx context.Context, db *bun.DB, templateData any) error { + return fn(ctx, db) + } +} + +func renderTemplate(contents []byte, templateData any) (*bytes.Buffer, error) { + tmpl, err := template.New("migration").Parse(string(contents)) + if err != nil { + return nil, fmt.Errorf("failed to parse template: %w", err) + } + + var rendered bytes.Buffer + if err := tmpl.Execute(&rendered, templateData); err != nil { + return nil, fmt.Errorf("failed to execute template: %w", err) + } + + return &rendered, nil +} + // Exec reads and executes the SQL migration in the f. -func Exec(ctx context.Context, db *bun.DB, f io.Reader, isTx bool) error { - scanner := bufio.NewScanner(f) +func Exec(ctx context.Context, db *bun.DB, f io.Reader, templateData any, isTx bool) error { + contents, err := io.ReadAll(f) + if err != nil { + return err + } + var reader io.Reader = bytes.NewReader(contents) + if templateData != nil { + buf, err := renderTemplate(contents, templateData) + if err != nil { + return err + } + reader = buf + } + scanner := bufio.NewScanner(reader) var queries []string var query []byte diff --git a/vendor/github.com/uptrace/bun/migrate/migrations.go b/vendor/github.com/uptrace/bun/migrate/migrations.go index 1a7ea5668..a22e615cb 100644 --- a/vendor/github.com/uptrace/bun/migrate/migrations.go +++ b/vendor/github.com/uptrace/bun/migrate/migrations.go @@ -58,8 +58,8 @@ func (m *Migrations) Register(up, down MigrationFunc) error { m.Add(Migration{ Name: name, Comment: comment, - Up: up, - Down: down, + Up: wrapMigrationFunc(up), + Down: wrapMigrationFunc(down), }) return nil diff --git a/vendor/github.com/uptrace/bun/migrate/migrator.go b/vendor/github.com/uptrace/bun/migrate/migrator.go index d5a72aec0..a325c3993 100644 --- a/vendor/github.com/uptrace/bun/migrate/migrator.go +++ b/vendor/github.com/uptrace/bun/migrate/migrator.go @@ -41,6 +41,12 @@ func WithMarkAppliedOnSuccess(enabled bool) MigratorOption { } } +func WithTemplateData(data any) MigratorOption { + return func(m *Migrator) { + m.templateData = data + } +} + type Migrator struct { db *bun.DB migrations *Migrations @@ -50,6 +56,8 @@ type Migrator struct { table string locksTable string markAppliedOnSuccess bool + + templateData any } func NewMigrator(db *bun.DB, migrations *Migrations, opts ...MigratorOption) *Migrator { @@ -168,7 +176,7 @@ func (m *Migrator) Migrate(ctx context.Context, opts ...MigrationOption) (*Migra group.Migrations = migrations[:i+1] if !cfg.nop && migration.Up != nil { - if err := migration.Up(ctx, m.db); err != nil { + if err := migration.Up(ctx, m.db, m.templateData); err != nil { return group, err } } @@ -207,7 +215,7 @@ func (m *Migrator) Rollback(ctx context.Context, opts ...MigrationOption) (*Migr } if !cfg.nop && migration.Down != nil { - if err := migration.Down(ctx, m.db); err != nil { + if err := migration.Down(ctx, m.db, m.templateData); err != nil { return lastGroup, err } } diff --git a/vendor/github.com/uptrace/bun/migrate/operations.go b/vendor/github.com/uptrace/bun/migrate/operations.go index 7b749c5a0..cede23d73 100644 --- a/vendor/github.com/uptrace/bun/migrate/operations.go +++ b/vendor/github.com/uptrace/bun/migrate/operations.go @@ -17,7 +17,7 @@ import ( // about the applied change. // // Some operations might be irreversible due to technical limitations. Returning -// a *comment from GetReverse() will add an explanatory note to the generate migation file. +// a *comment from GetReverse() will add an explanatory note to the generate migration file. // // To declare dependency on another Operation, operations should implement // { DependsOn(Operation) bool } interface, which Changeset will use to resolve dependencies. @@ -56,7 +56,7 @@ func (op *DropTableOp) DependsOn(another Operation) bool { // GetReverse for a DropTable returns a no-op migration. Logically, CreateTable is the reverse, // but DropTable does not have the table's definition to create one. func (op *DropTableOp) GetReverse() Operation { - c := comment(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName)) + c := Unimplemented(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName)) return &c } @@ -224,7 +224,6 @@ func (op *AddUniqueConstraintOp) DependsOn(another Operation) bool { default: return false } - } // DropUniqueConstraintOp drops a UNIQUE constraint. @@ -326,15 +325,15 @@ func (op *ChangePrimaryKeyOp) GetReverse() Operation { } } -// comment denotes an Operation that cannot be executed. +// Unimplemented denotes an Operation that cannot be executed. // // Operations, which cannot be reversed due to current technical limitations, -// may return &comment with a helpful message from their GetReverse() method. +// may have their GetReverse() return &Unimplemented with a helpful message. // -// Chnagelog should skip it when applying operations or output as log message, -// and write it as an SQL comment when creating migration files. -type comment string +// When applying operations, changelog should skip it or output as a log message, +// and write it as an SQL Unimplemented when creating migration files. +type Unimplemented string -var _ Operation = (*comment)(nil) +var _ Operation = (*Unimplemented)(nil) -func (c *comment) GetReverse() Operation { return c } +func (reason *Unimplemented) GetReverse() Operation { return reason } diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go index eb7476c54..3741f0c5d 100644 --- a/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go +++ b/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go @@ -4,12 +4,11 @@ import ( "slices" "strings" - "github.com/uptrace/bun/internal/ordered" "github.com/uptrace/bun/schema" ) type Database interface { - GetTables() *ordered.Map[string, Table] + GetTables() []Table GetForeignKeys() map[ForeignKey]string } @@ -20,11 +19,11 @@ var _ Database = (*BaseDatabase)(nil) // Dialects and only dialects can use it to implement the Database interface. // Other packages must use the Database interface. type BaseDatabase struct { - Tables *ordered.Map[string, Table] + Tables []Table ForeignKeys map[ForeignKey]string } -func (ds BaseDatabase) GetTables() *ordered.Map[string, Table] { +func (ds BaseDatabase) GetTables() []Table { return ds.Tables } diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go index 19d1dc469..d7333e8a9 100644 --- a/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go +++ b/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/uptrace/bun" - "github.com/uptrace/bun/internal/ordered" "github.com/uptrace/bun/schema" ) @@ -30,12 +29,23 @@ type InspectorDialect interface { // InspectorConfig controls the scope of migration by limiting the objects Inspector should return. // Inspectors SHOULD use the configuration directly instead of copying it, or MAY choose to embed it, // to make sure options are always applied correctly. +// +// ExcludeTables and ExcludeForeignKeys are intended for database inspectors, +// to compensate for the fact that model structs may not wholly reflect the +// state of the database schema. +// Database inspectors MUST respect these exclusions to prevent relations +// from being dropped unintentionally. type InspectorConfig struct { // SchemaName limits inspection to tables in a particular schema. SchemaName string - // ExcludeTables from inspection. + // ExcludeTables from inspection. Patterns MAY make use of wildcards + // like % and _ and dialects MUST acknowledge that by using them + // with the SQL LIKE operator. ExcludeTables []string + + // ExcludeForeignKeys from inspection. + ExcludeForeignKeys map[ForeignKey]string } // Inspector reads schema state. @@ -49,13 +59,26 @@ func WithSchemaName(schemaName string) InspectorOption { } } -// WithExcludeTables works in append-only mode, i.e. tables cannot be re-included. +// WithExcludeTables forces inspector to exclude tables from the reported schema state. +// It works in append-only mode, i.e. tables cannot be re-included. +// +// Patterns MAY make use of % and _ wildcards, as if writing a LIKE clause in SQL. func WithExcludeTables(tables ...string) InspectorOption { return func(cfg *InspectorConfig) { cfg.ExcludeTables = append(cfg.ExcludeTables, tables...) } } +// WithExcludeForeignKeys forces inspector to exclude foreign keys +// from the reported schema state. +func WithExcludeForeignKeys(fks ...ForeignKey) InspectorOption { + return func(cfg *InspectorConfig) { + for _, fk := range fks { + cfg.ExcludeForeignKeys[fk] = "" + } + } +} + // NewInspector creates a new database inspector, if the dialect supports it. func NewInspector(db *bun.DB, options ...InspectorOption) (Inspector, error) { dialect, ok := (db.Dialect()).(InspectorDialect) @@ -78,6 +101,9 @@ func NewBunModelInspector(tables *schema.Tables, options ...InspectorOption) *Bu type InspectorOption func(*InspectorConfig) func ApplyInspectorOptions(cfg *InspectorConfig, options ...InspectorOption) { + if cfg.ExcludeForeignKeys == nil { + cfg.ExcludeForeignKeys = make(map[ForeignKey]string) + } for _, opt := range options { opt(cfg) } @@ -90,6 +116,10 @@ type inspector struct { // BunModelInspector creates the current project state from the passed bun.Models. // Do not recycle BunModelInspector for different sets of models, as older models will not be de-registerred before the next run. +// +// BunModelInspector does not know which the database's dialect, so it does not +// assume any default schema name. Always specify the target schema name via +// WithSchemaName option to receive meaningful results. type BunModelInspector struct { InspectorConfig tables *schema.Tables @@ -102,21 +132,21 @@ func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) { BaseDatabase: BaseDatabase{ ForeignKeys: make(map[ForeignKey]string), }, - Tables: ordered.NewMap[string, Table](), } for _, t := range bmi.tables.All() { if t.Schema != bmi.SchemaName { continue } - columns := ordered.NewMap[string, Column]() + var columns []Column for _, f := range t.Fields { sqlType, length, err := parseLen(f.CreateTableSQLType) if err != nil { return nil, fmt.Errorf("parse length in %q: %w", f.CreateTableSQLType, err) } - columns.Store(f.Name, &BaseColumn{ + + columns = append(columns, &BaseColumn{ Name: f.Name, SQLType: strings.ToLower(sqlType), // TODO(dyma): maybe this is not necessary after Column.Eq() VarcharLen: length, @@ -162,7 +192,7 @@ func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) { // produces // schema.Table{ Schema: "favourite", Name: "favourite.books" } tableName := strings.TrimPrefix(t.Name, t.Schema+".") - state.Tables.Store(tableName, &BunTable{ + state.Tables = append(state.Tables, &BunTable{ BaseTable: BaseTable{ Schema: t.Schema, Name: tableName, @@ -212,7 +242,7 @@ func parseLen(typ string) (string, int, error) { } // exprOrLiteral converts string to lowercase, if it does not contain a string literal 'lit' -// and trims the surrounding '' otherwise. +// and trims the surrounding ” otherwise. // Use it to ensure that user-defined default values in the models are always comparable // to those returned by the database inspector, regardless of the case convention in individual drivers. func exprOrLiteral(s string) string { @@ -226,10 +256,10 @@ func exprOrLiteral(s string) string { type BunModelSchema struct { BaseDatabase - Tables *ordered.Map[string, Table] + Tables []Table } -func (ms BunModelSchema) GetTables() *ordered.Map[string, Table] { +func (ms BunModelSchema) GetTables() []Table { return ms.Tables } diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go index ec9b77f69..5e48d9adf 100644 --- a/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go +++ b/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go @@ -1,13 +1,9 @@ package sqlschema -import ( - "github.com/uptrace/bun/internal/ordered" -) - type Table interface { GetSchema() string GetName() string - GetColumns() *ordered.Map[string, Column] + GetColumns() []Column GetPrimaryKey() *PrimaryKey GetUniqueConstraints() []Unique } @@ -23,7 +19,7 @@ type BaseTable struct { Name string // ColumnDefinitions map each column name to the column definition. - Columns *ordered.Map[string, Column] + Columns []Column // PrimaryKey holds the primary key definition. // A nil value means that no primary key is defined for the table. @@ -47,7 +43,7 @@ func (td *BaseTable) GetName() string { return td.Name } -func (td *BaseTable) GetColumns() *ordered.Map[string, Column] { +func (td *BaseTable) GetColumns() []Column { return td.Columns } diff --git a/vendor/github.com/uptrace/bun/model.go b/vendor/github.com/uptrace/bun/model.go index 6254fc3ed..26133853e 100644 --- a/vendor/github.com/uptrace/bun/model.go +++ b/vendor/github.com/uptrace/bun/model.go @@ -39,6 +39,7 @@ type TableModel interface { getJoin(string) *relationJoin getJoins() []relationJoin addJoin(relationJoin) *relationJoin + clone() TableModel rootValue() reflect.Value parentIndex() []int diff --git a/vendor/github.com/uptrace/bun/model_map.go b/vendor/github.com/uptrace/bun/model_map.go index d7342576f..be3548382 100644 --- a/vendor/github.com/uptrace/bun/model_map.go +++ b/vendor/github.com/uptrace/bun/model_map.go @@ -5,7 +5,7 @@ import ( "context" "database/sql" "reflect" - "sort" + "slices" "github.com/uptrace/bun/schema" ) @@ -121,7 +121,7 @@ func (m *mapModel) appendColumnsValues(fmter schema.Formatter, b []byte) []byte for k := range m.m { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) b = append(b, " ("...) @@ -157,7 +157,7 @@ func (m *mapModel) appendSet(fmter schema.Formatter, b []byte) []byte { for k := range m.m { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) isTemplate := fmter.IsNop() for i, k := range keys { diff --git a/vendor/github.com/uptrace/bun/model_map_slice.go b/vendor/github.com/uptrace/bun/model_map_slice.go index 8e4a22f6b..472b68d2d 100644 --- a/vendor/github.com/uptrace/bun/model_map_slice.go +++ b/vendor/github.com/uptrace/bun/model_map_slice.go @@ -4,7 +4,7 @@ import ( "context" "database/sql" "errors" - "sort" + "slices" "github.com/uptrace/bun/dialect/feature" "github.com/uptrace/bun/schema" @@ -155,7 +155,7 @@ func (m *mapSliceModel) initKeys() error { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) m.keys = keys return nil diff --git a/vendor/github.com/uptrace/bun/model_table_has_many.go b/vendor/github.com/uptrace/bun/model_table_has_many.go index dd74a774c..a3ff6a824 100644 --- a/vendor/github.com/uptrace/bun/model_table_has_many.go +++ b/vendor/github.com/uptrace/bun/model_table_has_many.go @@ -130,6 +130,16 @@ func (m *hasManyModel) parkStruct() error { return nil } +func (m *hasManyModel) clone() TableModel { + return &hasManyModel{ + sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel), + baseTable: m.baseTable, + rel: m.rel, + baseValues: m.baseValues, + structKey: m.structKey, + } +} + func baseValues(model TableModel, fields []*schema.Field) map[internal.MapKey][]reflect.Value { fieldIndex := model.Relation().Field.Index m := make(map[internal.MapKey][]reflect.Value) diff --git a/vendor/github.com/uptrace/bun/model_table_m2m.go b/vendor/github.com/uptrace/bun/model_table_m2m.go index 1a6b1b46a..dfc4783c7 100644 --- a/vendor/github.com/uptrace/bun/model_table_m2m.go +++ b/vendor/github.com/uptrace/bun/model_table_m2m.go @@ -130,3 +130,13 @@ func (m *m2mModel) parkStruct() error { return nil } + +func (m *m2mModel) clone() TableModel { + return &m2mModel{ + sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel), + baseTable: m.baseTable, + rel: m.rel, + baseValues: m.baseValues, + structKey: m.structKey, + } +} diff --git a/vendor/github.com/uptrace/bun/model_table_slice.go b/vendor/github.com/uptrace/bun/model_table_slice.go index 67b421460..b10319f2b 100644 --- a/vendor/github.com/uptrace/bun/model_table_slice.go +++ b/vendor/github.com/uptrace/bun/model_table_slice.go @@ -124,3 +124,13 @@ func (m *sliceTableModel) updateSoftDeleteField(tm time.Time) error { } return nil } + +func (m *sliceTableModel) clone() TableModel { + return &sliceTableModel{ + structTableModel: *m.structTableModel.clone().(*structTableModel), + slice: m.slice, + sliceLen: m.sliceLen, + sliceOfPtr: m.sliceOfPtr, + nextElem: m.nextElem, + } +} diff --git a/vendor/github.com/uptrace/bun/model_table_struct.go b/vendor/github.com/uptrace/bun/model_table_struct.go index a8860908e..345d0f5e7 100644 --- a/vendor/github.com/uptrace/bun/model_table_struct.go +++ b/vendor/github.com/uptrace/bun/model_table_struct.go @@ -337,6 +337,23 @@ func (m *structTableModel) AppendNamedArg( return m.table.AppendNamedArg(fmter, b, name, m.strct) } +func (m *structTableModel) clone() TableModel { + return &structTableModel{ + db: m.db, + table: m.table, + rel: m.rel, + joins: append([]relationJoin{}, m.joins...), + dest: m.dest, + root: m.root, + index: append([]int{}, m.index...), + strct: m.strct, + structInited: m.structInited, + structInitErr: m.structInitErr, + columns: append([]string{}, m.columns...), + scanIndex: m.scanIndex, + } +} + // sqlite3 sometimes does not unquote columns. func unquote(s string) string { if s == "" { diff --git a/vendor/github.com/uptrace/bun/package.json b/vendor/github.com/uptrace/bun/package.json index cb1c8d237..740a9c1d5 100644 --- a/vendor/github.com/uptrace/bun/package.json +++ b/vendor/github.com/uptrace/bun/package.json @@ -1,6 +1,6 @@ { "name": "gobun", - "version": "1.2.11", + "version": "1.2.14", "main": "index.js", "repository": "git@github.com:uptrace/bun.git", "author": "Vladimir Mihailenco ", diff --git a/vendor/github.com/uptrace/bun/query_delete.go b/vendor/github.com/uptrace/bun/query_delete.go index bbeb2b4b4..d93eeec00 100644 --- a/vendor/github.com/uptrace/bun/query_delete.go +++ b/vendor/github.com/uptrace/bun/query_delete.go @@ -384,12 +384,13 @@ func (q *DeleteQuery) afterDeleteHook(ctx context.Context) error { return nil } +// String returns the generated SQL query string. The DeleteQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *DeleteQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/query_insert.go b/vendor/github.com/uptrace/bun/query_insert.go index 5b5b78dcf..2705923ee 100644 --- a/vendor/github.com/uptrace/bun/query_insert.go +++ b/vendor/github.com/uptrace/bun/query_insert.go @@ -694,11 +694,12 @@ func (q *InsertQuery) tryLastInsertID(res sql.Result, dest []interface{}) error return nil } +// String returns the generated SQL query string. The InsertQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *InsertQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/query_merge.go b/vendor/github.com/uptrace/bun/query_merge.go index 67dc43974..dd89ce9c9 100644 --- a/vendor/github.com/uptrace/bun/query_merge.go +++ b/vendor/github.com/uptrace/bun/query_merge.go @@ -281,12 +281,13 @@ func (q *MergeQuery) scanOrExec( return res, nil } +// String returns the generated SQL query string. The MergeQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *MergeQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/query_raw.go b/vendor/github.com/uptrace/bun/query_raw.go index bfc0d3050..b7dd59089 100644 --- a/vendor/github.com/uptrace/bun/query_raw.go +++ b/vendor/github.com/uptrace/bun/query_raw.go @@ -96,11 +96,12 @@ func (q *RawQuery) Operation() string { return "SELECT" } +// String returns the generated SQL query string. The RawQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *RawQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/query_select.go b/vendor/github.com/uptrace/bun/query_select.go index db7f42df1..0deb8dc05 100644 --- a/vendor/github.com/uptrace/bun/query_select.go +++ b/vendor/github.com/uptrace/bun/query_select.go @@ -979,20 +979,25 @@ func (q *SelectQuery) scanAndCountConcurrently( var mu sync.Mutex var firstErr error + // FIXME: clone should not be needed, because the query is not modified here + // and should not be implicitly modified by the Bun lib. countQuery := q.Clone() - wg.Add(1) - go func() { - defer wg.Done() + // Don't scan results if the user explicitly set Limit(-1). + if q.limit >= 0 { + wg.Add(1) + go func() { + defer wg.Done() - if err := q.Scan(ctx, dest...); err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err + if err := q.Scan(ctx, dest...); err != nil { + mu.Lock() + if firstErr == nil { + firstErr = err + } + mu.Unlock() } - mu.Unlock() - } - }() + }() + } wg.Add(1) go func() { @@ -1016,6 +1021,7 @@ func (q *SelectQuery) scanAndCountConcurrently( func (q *SelectQuery) scanAndCountSeq(ctx context.Context, dest ...interface{}) (int, error) { var firstErr error + // Don't scan results if the user explicitly set Limit(-1). if q.limit >= 0 { firstErr = q.Scan(ctx, dest...) } @@ -1086,12 +1092,13 @@ func (q *SelectQuery) whereExists(ctx context.Context) (bool, error) { return n == 1, nil } +// String returns the generated SQL query string. The SelectQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *SelectQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } @@ -1120,13 +1127,17 @@ func (q *SelectQuery) Clone() *SelectQuery { } } + var tableModel TableModel + if q.tableModel != nil { + tableModel = q.tableModel.clone() + } clone := &SelectQuery{ whereBaseQuery: whereBaseQuery{ baseQuery: baseQuery{ db: q.db, table: q.table, model: q.model, - tableModel: q.tableModel, + tableModel: tableModel, with: make([]withQuery, len(q.with)), tables: cloneArgs(q.tables), columns: cloneArgs(q.columns), diff --git a/vendor/github.com/uptrace/bun/query_table_create.go b/vendor/github.com/uptrace/bun/query_table_create.go index 0ae56bf78..3e1474ed7 100644 --- a/vendor/github.com/uptrace/bun/query_table_create.go +++ b/vendor/github.com/uptrace/bun/query_table_create.go @@ -5,7 +5,7 @@ import ( "context" "database/sql" "fmt" - "sort" + "slices" "strconv" "strings" @@ -276,7 +276,7 @@ func (q *CreateTableQuery) appendUniqueConstraints(fmter schema.Formatter, b []b for key := range unique { keys = append(keys, key) } - sort.Strings(keys) + slices.Sort(keys) for _, key := range keys { if key == "" { @@ -308,8 +308,16 @@ func (q *CreateTableQuery) appendUniqueConstraint( // appendFKConstraintsRel appends a FOREIGN KEY clause for each of the model's existing relations. func (q *CreateTableQuery) appendFKConstraintsRel(fmter schema.Formatter, b []byte) (_ []byte, err error) { - for _, rel := range q.tableModel.Table().Relations { - if rel.References() { + relations := q.tableModel.Table().Relations + + keys := make([]string, 0, len(relations)) + for key := range relations { + keys = append(keys, key) + } + slices.Sort(keys) + + for _, key := range keys { + if rel := relations[key]; rel.References() { b, err = q.appendFK(fmter, b, schema.QueryWithArgs{ Query: "(?) REFERENCES ? (?) ? ?", Args: []interface{}{ @@ -400,11 +408,12 @@ func (q *CreateTableQuery) afterCreateTableHook(ctx context.Context) error { return nil } +// String returns the generated SQL query string. The CreateTableQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *CreateTableQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/query_table_drop.go b/vendor/github.com/uptrace/bun/query_table_drop.go index 78d964d7a..70d69ce64 100644 --- a/vendor/github.com/uptrace/bun/query_table_drop.go +++ b/vendor/github.com/uptrace/bun/query_table_drop.go @@ -165,11 +165,12 @@ func (q *DropTableQuery) afterDropTableHook(ctx context.Context) error { return nil } +// String returns the generated SQL query string. The DropTableQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *DropTableQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/query_update.go b/vendor/github.com/uptrace/bun/query_update.go index beef8851e..2814474d5 100644 --- a/vendor/github.com/uptrace/bun/query_update.go +++ b/vendor/github.com/uptrace/bun/query_update.go @@ -635,12 +635,13 @@ func (q *UpdateQuery) hasTableAlias(fmter schema.Formatter) bool { return fmter.HasFeature(feature.UpdateMultiTable | feature.UpdateTableAlias) } +// String returns the generated SQL query string. The UpdateQuery instance must not be +// modified during query generation to ensure multiple calls to String() return identical results. func (q *UpdateQuery) String() string { buf, err := q.AppendQuery(q.db.Formatter(), nil) if err != nil { panic(err) } - return string(buf) } diff --git a/vendor/github.com/uptrace/bun/schema/scan.go b/vendor/github.com/uptrace/bun/schema/scan.go index 9db46cd6f..0c4f8a824 100644 --- a/vendor/github.com/uptrace/bun/schema/scan.go +++ b/vendor/github.com/uptrace/bun/schema/scan.go @@ -5,6 +5,7 @@ import ( "database/sql" "fmt" "net" + "net/netip" "reflect" "strconv" "strings" @@ -102,6 +103,10 @@ func scanner(typ reflect.Type) ScannerFunc { return scanIP case ipNetType: return scanIPNet + case netipAddrType: + return scanNetIpAddr + case netipPrefixType: + return scanNetIpPrefix case jsonRawMessageType: return scanBytes } @@ -413,6 +418,48 @@ func scanIPNet(dest reflect.Value, src interface{}) error { return nil } +func scanNetIpAddr(dest reflect.Value, src interface{}) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + val, _ := netip.ParseAddr(internal.String(b)) + if !val.IsValid() { + return fmt.Errorf("bun: invalid ip: %q", b) + } + + ptr := dest.Addr().Interface().(*netip.Addr) + *ptr = val + + return nil +} + +func scanNetIpPrefix(dest reflect.Value, src interface{}) error { + if src == nil { + return scanNull(dest) + } + + b, err := toBytes(src) + if err != nil { + return err + } + + val, _ := netip.ParsePrefix(internal.String(b)) + if !val.IsValid() { + return fmt.Errorf("bun: invalid prefix: %q", b) + } + + ptr := dest.Addr().Interface().(*netip.Prefix) + *ptr = val + + return nil +} + func addrScanner(fn ScannerFunc) ScannerFunc { return func(dest reflect.Value, src interface{}) error { if !dest.CanAddr() { diff --git a/vendor/github.com/uptrace/bun/schema/table.go b/vendor/github.com/uptrace/bun/schema/table.go index 93313597b..44d5e5719 100644 --- a/vendor/github.com/uptrace/bun/schema/table.go +++ b/vendor/github.com/uptrace/bun/schema/table.go @@ -1,10 +1,11 @@ package schema import ( + "cmp" "database/sql" "fmt" "reflect" - "sort" + "slices" "strings" "time" @@ -299,15 +300,14 @@ func (t *Table) processFields(typ reflect.Type) { } func sortFieldsByStruct(fields []*Field) { - sort.Slice(fields, func(i, j int) bool { - left, right := fields[i], fields[j] + slices.SortFunc(fields, func(left, right *Field) int { for k := 0; k < len(left.Index) && k < len(right.Index); k++ { - if left.Index[k] != right.Index[k] { - return left.Index[k] < right.Index[k] + if res := cmp.Compare(left.Index[k], right.Index[k]); res != 0 { + return res } } // NOTE: should not reach - return true + return 0 }) } @@ -538,6 +538,7 @@ func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field { } if tag.HasOption("autoincrement") { field.AutoIncrement = true + field.NotNull = true field.NullZero = true } if tag.HasOption("identity") { diff --git a/vendor/github.com/uptrace/bun/version.go b/vendor/github.com/uptrace/bun/version.go index a7973efeb..f1a7efe4a 100644 --- a/vendor/github.com/uptrace/bun/version.go +++ b/vendor/github.com/uptrace/bun/version.go @@ -2,5 +2,5 @@ package bun // Version is the current release version. func Version() string { - return "1.2.11" + return "1.2.14" } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go index 2b5e78686..fabf952c4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go @@ -5,6 +5,18 @@ // // The metric events produced are: // +// go.memory.used By Memory used by the Go runtime. +// go.memory.limit By Go runtime memory limit configured by the user, if a limit exists. +// go.memory.allocated By Memory allocated to the heap by the application. +// go.memory.allocations {allocation} Count of allocations to the heap by the application. +// go.memory.gc.goal By Heap size target for the end of the GC cycle. +// go.goroutine.count {goroutine} Count of live goroutines. +// go.processor.limit {thread} The number of OS threads that can execute user-level Go code simultaneously. +// go.config.gogc % Heap size target percentage configured by the user, otherwise 100. +// +// When the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable is set to +// true, the following deprecated metrics are produced: +// // runtime.go.cgo.calls - Number of cgo calls made by the current process // runtime.go.gc.count - Number of completed garbage collection cycles // runtime.go.gc.pause_ns (ns) Amount of nanoseconds in GC stop-the-world pauses @@ -19,16 +31,4 @@ // runtime.go.mem.heap_sys (bytes) Bytes of heap memory obtained from the OS // runtime.go.mem.live_objects - Number of live objects is the number of cumulative Mallocs - Frees // runtime.uptime (ms) Milliseconds since application was initialized -// -// When the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable is set to -// false, the metrics produced are: -// -// go.memory.used By Memory used by the Go runtime. -// go.memory.limit By Go runtime memory limit configured by the user, if a limit exists. -// go.memory.allocated By Memory allocated to the heap by the application. -// go.memory.allocations {allocation} Count of allocations to the heap by the application. -// go.memory.gc.goal By Heap size target for the end of the GC cycle. -// go.goroutine.count {goroutine} Count of live goroutines. -// go.processor.limit {thread} The number of OS threads that can execute user-level Go code simultaneously. -// go.config.gogc % Heap size target percentage configured by the user, otherwise 100. package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/README.md b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/README.md index a2367651a..00170e1a6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/README.md +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/README.md @@ -13,22 +13,13 @@ change in backwards incompatible ways as feedback is applied. ### Include Deprecated Metrics -Once new experimental runtime metrics are added, they will be produced -**in addition to** the existing runtime metrics. Users that migrate right away -can disable the old runtime metrics: - -```console -export OTEL_GO_X_DEPRECATED_RUNTIME_METRICS=false -``` - -In a later release, the deprecated runtime metrics will stop being produced by -default. To temporarily re-enable the deprecated metrics: +To temporarily re-enable the deprecated metrics: ```console export OTEL_GO_X_DEPRECATED_RUNTIME_METRICS=true ``` -After two additional releases, the deprecated runtime metrics will be removed, +Eventually, the deprecated runtime metrics will be removed, and setting the environment variable will no longer have any effect. The value set must be the case-insensitive string of `"true"` to enable the diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/x.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/x.go index 7ffb473ad..95a05d599 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/x.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/internal/x/x.go @@ -9,17 +9,17 @@ package x // import "go.opentelemetry.io/contrib/instrumentation/runtime/interna import ( "os" - "strings" + "strconv" ) // DeprecatedRuntimeMetrics is an experimental feature flag that defines if the deprecated // runtime metrics should be produced. During development of the new // conventions, it is enabled by default. // -// To disable this feature set the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable -// to the case-insensitive string value of "false" (i.e. "False" and "FALSE" +// To enable this feature set the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" // will also enable this). -var DeprecatedRuntimeMetrics = newFeature("DEPRECATED_RUNTIME_METRICS", true) +var DeprecatedRuntimeMetrics = newFeature("DEPRECATED_RUNTIME_METRICS", false) // BoolFeature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. @@ -43,11 +43,11 @@ func (f BoolFeature) Key() string { return f.key } // Enabled returns if the feature is enabled. func (f BoolFeature) Enabled() bool { v := os.Getenv(f.key) - if strings.ToLower(v) == "false" { - return false + + val, err := strconv.ParseBool(v) + if err != nil { + return f.defaultVal } - if strings.ToLower(v) == "true" { - return true - } - return f.defaultVal + + return val } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go index d0ffe2764..fec833b57 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.34.0/goconv" "go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime" "go.opentelemetry.io/contrib/instrumentation/runtime/internal/x" @@ -43,78 +44,48 @@ func Start(opts ...Option) error { metric.WithInstrumentationVersion(Version()), ) if x.DeprecatedRuntimeMetrics.Enabled() { - return deprecatedruntime.Start(meter, c.MinimumReadMemStatsInterval) + if err := deprecatedruntime.Start(meter, c.MinimumReadMemStatsInterval); err != nil { + return err + } } - memoryUsedInstrument, err := meter.Int64ObservableUpDownCounter( - "go.memory.used", - metric.WithUnit("By"), - metric.WithDescription("Memory used by the Go runtime."), - ) + memoryUsed, err := goconv.NewMemoryUsed(meter) if err != nil { return err } - memoryLimitInstrument, err := meter.Int64ObservableUpDownCounter( - "go.memory.limit", - metric.WithUnit("By"), - metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."), - ) + memoryLimit, err := goconv.NewMemoryLimit(meter) if err != nil { return err } - memoryAllocatedInstrument, err := meter.Int64ObservableCounter( - "go.memory.allocated", - metric.WithUnit("By"), - metric.WithDescription("Memory allocated to the heap by the application."), - ) + memoryAllocated, err := goconv.NewMemoryAllocated(meter) if err != nil { return err } - memoryAllocationsInstrument, err := meter.Int64ObservableCounter( - "go.memory.allocations", - metric.WithUnit("{allocation}"), - metric.WithDescription("Count of allocations to the heap by the application."), - ) + memoryAllocations, err := goconv.NewMemoryAllocations(meter) if err != nil { return err } - memoryGCGoalInstrument, err := meter.Int64ObservableUpDownCounter( - "go.memory.gc.goal", - metric.WithUnit("By"), - metric.WithDescription("Heap size target for the end of the GC cycle."), - ) + memoryGCGoal, err := goconv.NewMemoryGCGoal(meter) if err != nil { return err } - goroutineCountInstrument, err := meter.Int64ObservableUpDownCounter( - "go.goroutine.count", - metric.WithUnit("{goroutine}"), - metric.WithDescription("Count of live goroutines."), - ) + goroutineCount, err := goconv.NewGoroutineCount(meter) if err != nil { return err } - processorLimitInstrument, err := meter.Int64ObservableUpDownCounter( - "go.processor.limit", - metric.WithUnit("{thread}"), - metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."), - ) + processorLimit, err := goconv.NewProcessorLimit(meter) if err != nil { return err } - gogcConfigInstrument, err := meter.Int64ObservableUpDownCounter( - "go.config.gogc", - metric.WithUnit("%"), - metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."), - ) + configGogc, err := goconv.NewConfigGogc(meter) if err != nil { return err } otherMemoryOpt := metric.WithAttributeSet( - attribute.NewSet(attribute.String("go.memory.type", "other")), + attribute.NewSet(memoryUsed.AttrMemoryType(goconv.MemoryTypeOther)), ) stackMemoryOpt := metric.WithAttributeSet( - attribute.NewSet(attribute.String("go.memory.type", "stack")), + attribute.NewSet(memoryUsed.AttrMemoryType(goconv.MemoryTypeStack)), ) collector := newCollector(c.MinimumReadMemStatsInterval, runtimeMetrics) var lock sync.Mutex @@ -124,30 +95,30 @@ func Start(opts ...Option) error { defer lock.Unlock() collector.refresh() stackMemory := collector.getInt(goHeapMemory) - o.ObserveInt64(memoryUsedInstrument, stackMemory, stackMemoryOpt) + o.ObserveInt64(memoryUsed.Inst(), stackMemory, stackMemoryOpt) totalMemory := collector.getInt(goTotalMemory) - collector.getInt(goMemoryReleased) otherMemory := totalMemory - stackMemory - o.ObserveInt64(memoryUsedInstrument, otherMemory, otherMemoryOpt) + o.ObserveInt64(memoryUsed.Inst(), otherMemory, otherMemoryOpt) // Only observe the limit metric if a limit exists if limit := collector.getInt(goMemoryLimit); limit != math.MaxInt64 { - o.ObserveInt64(memoryLimitInstrument, limit) + o.ObserveInt64(memoryLimit.Inst(), limit) } - o.ObserveInt64(memoryAllocatedInstrument, collector.getInt(goMemoryAllocated)) - o.ObserveInt64(memoryAllocationsInstrument, collector.getInt(goMemoryAllocations)) - o.ObserveInt64(memoryGCGoalInstrument, collector.getInt(goMemoryGoal)) - o.ObserveInt64(goroutineCountInstrument, collector.getInt(goGoroutines)) - o.ObserveInt64(processorLimitInstrument, collector.getInt(goMaxProcs)) - o.ObserveInt64(gogcConfigInstrument, collector.getInt(goConfigGC)) + o.ObserveInt64(memoryAllocated.Inst(), collector.getInt(goMemoryAllocated)) + o.ObserveInt64(memoryAllocations.Inst(), collector.getInt(goMemoryAllocations)) + o.ObserveInt64(memoryGCGoal.Inst(), collector.getInt(goMemoryGoal)) + o.ObserveInt64(goroutineCount.Inst(), collector.getInt(goGoroutines)) + o.ObserveInt64(processorLimit.Inst(), collector.getInt(goMaxProcs)) + o.ObserveInt64(configGogc.Inst(), collector.getInt(goConfigGC)) return nil }, - memoryUsedInstrument, - memoryLimitInstrument, - memoryAllocatedInstrument, - memoryAllocationsInstrument, - memoryGCGoalInstrument, - goroutineCountInstrument, - processorLimitInstrument, - gogcConfigInstrument, + memoryUsed.Inst(), + memoryLimit.Inst(), + memoryAllocated.Inst(), + memoryAllocations.Inst(), + memoryGCGoal.Inst(), + goroutineCount.Inst(), + processorLimit.Inst(), + configGogc.Inst(), ) if err != nil { return err diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go index 4161ec624..2d1da2549 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go @@ -5,6 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime" // Version is the current release version of the runtime instrumentation. func Version() string { - return "0.61.0" + return "0.62.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 000000000..128d61a22 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 888e5da80..5f69cc027 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -66,8 +66,6 @@ linters: desc: Do not use cross-module internal packages. - pkg: go.opentelemetry.io/otel/internal/internaltest desc: Do not use cross-module internal packages. - - pkg: go.opentelemetry.io/otel/internal/matchers - desc: Do not use cross-module internal packages. otlp-internal: files: - '!**/exporters/otlp/internal/**/*.go' @@ -190,6 +188,10 @@ linters: - legacy - std-error-handling rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names # TODO: Having appropriate comments for exported objects helps development, # even for objects in internal packages. Appropriate comments for all # exported objects should be added and this exclusion removed. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 648e4abab..4acc75701 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + ## [1.36.0/0.58.0/0.12.0] 2025-05-20 ### Added @@ -3288,7 +3343,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 [1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 1902dac05..f9ddc281f 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -109,10 +109,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals need to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -650,11 +649,11 @@ should be canceled. ### Maintainers -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) ### Emeritus diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 62a56f4d3..4fa423ca0 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -293,7 +293,7 @@ semconv-generate: $(SEMCONVKIT) --param tag=$(TAG) \ go \ /home/weaver/target - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index b60078812..5fa1b75c6 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -7,6 +7,7 @@ [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 7c1a9119d..1ddcdef03 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -112,6 +112,29 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +### Sign the Release Artifact + +To ensure we comply with CNCF best practices, we need to sign the release artifact. +The tarball attached to the GitHub release needs to be signed with your GPG key. + +Follow [these steps] to sign the release artifact and upload it to GitHub. +You can use [this script] to verify the contents of the tarball before signing it. + +Be sure to use the correct GPG key when signing the release artifact. + +```terminal +gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +``` + +You can verify the signature with: + +```terminal +gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +``` + +[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases +[this script]: https://github.com/MrAlias/attest-sh + ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 51fb76b30..935bd4876 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python -FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver +FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python +FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go index 05abd92ee..1add3f333 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go @@ -5,6 +5,7 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o import ( "context" + "errors" "fmt" "time" @@ -192,7 +193,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } @@ -228,6 +229,8 @@ func retryable(err error) (bool, time.Duration) { func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { + // Follows the retryable error codes defined in + // https://opentelemetry.io/docs/specs/otlp/#failures case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go index 896c3a303..fa5946774 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go index 954597340..42d186f31 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go @@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. func Version() string { - return "0.12.2" + return "0.13.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go index bd9a750a1..a0a9dc133 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go index 9702a4c0b..c8e9c8867 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go @@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. func Version() string { - return "0.12.2" + return "0.13.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index e0fa0570a..82a4c2c2a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -5,6 +5,7 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme import ( "context" + "errors" "time" "google.golang.org/genproto/googleapis/rpc/errdetails" @@ -149,7 +150,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index cb77ae6a9..758d1ea32 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -105,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go index 37cc6c519..80691ac3a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index 58859f2c2..b34d35b0b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go index cfe629a97..ed66bb068 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -105,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go index c855bdc93..8a5fa80ea 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 528533321..1175a6575 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8409b5f8f..8236c995a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 506ca00b6..4f47117a5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 777e68a7b..259a898ae 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index c857db056..e415feea6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go index e9d35c7fa..107428fa6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 5f78bfdfb..ed2ddce71 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index ceb2d63e2..521838840 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -125,9 +125,8 @@ func WithoutCounterSuffixes() Option { }) } -// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric. -// If not specified, the Exporter will create a otel_scope_info metric containing -// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points. +// WithoutScopeInfo configures the Exporter to not export +// labels about Instrumentation Scope to all metric points. func WithoutScopeInfo() Option { return optionFunc(func(cfg config) config { cfg.disableScopeInfo = true @@ -136,7 +135,7 @@ func WithoutScopeInfo() Option { } // WithNamespace configures the Exporter to prefix metric with the given namespace. -// Metadata metrics such as target_info and otel_scope_info are not prefixed since these +// Metadata metrics such as target_info are not prefixed since these // have special behavior based on their name. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index e0959641c..7b44c12c5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -21,7 +21,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" @@ -31,25 +30,20 @@ const ( targetInfoMetricName = "target_info" targetInfoDescription = "Target metadata" - scopeInfoMetricName = "otel_scope_info" - scopeInfoDescription = "Instrumentation Scope metadata" - - scopeNameLabel = "otel_scope_name" - scopeVersionLabel = "otel_scope_version" + scopeLabelPrefix = "otel_scope_" + scopeNameLabel = scopeLabelPrefix + "name" + scopeVersionLabel = scopeLabelPrefix + "version" + scopeSchemaLabel = scopeLabelPrefix + "schema_url" traceIDExemplarKey = "trace_id" spanIDExemplarKey = "span_id" ) -var ( - errScopeInvalid = errors.New("invalid scope") - - metricsPool = sync.Pool{ - New: func() interface{} { - return &metricdata.ResourceMetrics{} - }, - } -) +var metricsPool = sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, +} // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -97,8 +91,6 @@ type collector struct { mu sync.Mutex // mu protects all members below from the concurrent access. disableTargetInfo bool targetInfo prometheus.Metric - scopeInfos map[instrumentation.Scope]prometheus.Metric - scopeInfosInvalid map[instrumentation.Scope]struct{} metricFamilies map[string]*dto.MetricFamily resourceKeyVals keyVals } @@ -122,8 +114,6 @@ func New(opts ...Option) (*Exporter, error) { withoutUnits: cfg.withoutUnits, withoutCounterSuffixes: cfg.withoutCounterSuffixes, disableScopeInfo: cfg.disableScopeInfo, - scopeInfos: make(map[instrumentation.Scope]prometheus.Metric), - scopeInfosInvalid: make(map[instrumentation.Scope]struct{}), metricFamilies: make(map[string]*dto.MetricFamily), namespace: cfg.namespace, resourceAttributesFilter: cfg.resourceAttributesFilter, @@ -202,20 +192,15 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if !c.disableScopeInfo { - scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) - if errors.Is(err, errScopeInvalid) { - // Do not report the same error multiple times. - continue - } - if err != nil { - otel.Handle(err) - continue - } + kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel) + kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL) - ch <- scopeInfo - - kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel) - kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version) + attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes) + for i := range attrKeys { + attrKeys[i] = scopeLabelPrefix + attrKeys[i] + } + kv.keys = append(kv.keys, attrKeys...) + kv.vals = append(kv.vals, attrVals...) } kv.keys = append(kv.keys, c.resourceKeyVals.keys...) @@ -259,6 +244,59 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } } +// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution. +func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket { + if len(bucket.Counts) == 0 || scaleDelta < 1 { + return metricdata.ExponentialBucket{ + Offset: bucket.Offset >> scaleDelta, + Counts: append([]uint64(nil), bucket.Counts...), // copy slice + } + } + + // The new offset is scaled down + newOffset := bucket.Offset >> scaleDelta + + // Pre-calculate the new bucket count to avoid growing slice + // Each group of 2^scaleDelta buckets will merge into one bucket + //nolint:gosec // Length is bounded by slice allocation + lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1 + lastNewIdx := lastBucketIdx >> scaleDelta + newBucketCount := int(lastNewIdx - newOffset + 1) + + if newBucketCount <= 0 { + return metricdata.ExponentialBucket{ + Offset: newOffset, + Counts: []uint64{}, + } + } + + newCounts := make([]uint64, newBucketCount) + + // Merge buckets according to the scale difference + for i, count := range bucket.Counts { + if count == 0 { + continue + } + + // Calculate which new bucket this count belongs to + //nolint:gosec // Index is bounded by loop iteration + originalIdx := bucket.Offset + int32(i) + newIdx := originalIdx >> scaleDelta + + // Calculate the position in the new counts array + position := newIdx - newOffset + //nolint:gosec // Length is bounded by allocation + if position >= 0 && position < int32(len(newCounts)) { + newCounts[position] += count + } + } + + return metricdata.ExponentialBucket{ + Offset: newOffset, + Counts: newCounts, + } +} + func addExponentialHistogramMetric[N int64 | float64]( ch chan<- prometheus.Metric, histogram metricdata.ExponentialHistogram[N], @@ -273,23 +311,43 @@ func addExponentialHistogramMetric[N int64 | float64]( desc := prometheus.NewDesc(name, m.Description, keys, nil) + // Prometheus native histograms support scales in the range [-4, 8] + scale := dp.Scale + if scale < -4 { + // Reject scales below -4 as they cannot be represented in Prometheus + otel.Handle(fmt.Errorf( + "exponential histogram scale %d is below minimum supported scale -4, skipping data point", + scale)) + continue + } + + // If scale > 8, we need to downscale the buckets to match the clamped scale + positiveBucket := dp.PositiveBucket + negativeBucket := dp.NegativeBucket + if scale > 8 { + scaleDelta := scale - 8 + positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta) + negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta) + scale = 8 + } + // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one. positiveBuckets := make(map[int]int64) - for i, c := range dp.PositiveBucket.Counts { + for i, c := range positiveBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c)) continue } - positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } negativeBuckets := make(map[int]int64) - for i, c := range dp.NegativeBucket.Counts { + for i, c := range negativeBucket.Counts { if c > math.MaxInt64 { otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c)) continue } - negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } m, err := prometheus.NewConstNativeHistogram( @@ -299,7 +357,7 @@ func addExponentialHistogramMetric[N int64 | float64]( positiveBuckets, negativeBuckets, dp.ZeroCount, - dp.Scale, + scale, dp.ZeroThreshold, dp.StartTime, values...) @@ -440,15 +498,11 @@ func createInfoMetric(name, description string, res *resource.Resource) (prometh return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } -func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { - attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version - attrs = append(attrs, scope.Attributes.ToSlice()...) - attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name)) - attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version)) - - keys, values := getAttrs(attribute.NewSet(attrs...)) - desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) +func unitMapGetOrDefault(unit string) string { + if promUnit, ok := unitSuffixes[unit]; ok { + return promUnit + } + return unit } var unitSuffixes = map[string]string{ @@ -509,7 +563,7 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { if c.namespace != "" { name = c.namespace + name } - if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) { + if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) { name += "_" + suffix } if addCounterSuffix { @@ -556,30 +610,6 @@ func (c *collector) createResourceAttributes(res *resource.Resource) { c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} } -func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) { - c.mu.Lock() - defer c.mu.Unlock() - - scopeInfo, ok := c.scopeInfos[scope] - if ok { - return scopeInfo, nil - } - - if _, ok := c.scopeInfosInvalid[scope]; ok { - return nil, errScopeInvalid - } - - scopeInfo, err := createScopeInfoMetric(scope) - if err != nil { - c.scopeInfosInvalid[scope] = struct{}{} - return nil, fmt.Errorf("cannot create scope info metric: %w", err) - } - - c.scopeInfos[scope] = scopeInfo - - return scopeInfo, nil -} - func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) { c.mu.Lock() defer c.mu.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go index 43aba8a5c..681634282 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutlog/record.go @@ -106,7 +106,7 @@ func (e *Exporter) newRecordJSON(r sdklog.Record) recordJSON { Attributes: make([]keyValue, 0, r.AttributesLen()), - Resource: &res, + Resource: res, Scope: r.InstrumentationScope(), DroppedAttributes: r.DroppedAttributes(), diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go index 1205f08e2..99a429a71 100644 --- a/vendor/go.opentelemetry.io/otel/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/log/logger.go @@ -136,5 +136,6 @@ func WithSchemaURL(schemaURL string) LoggerOption { // EnabledParameters represents payload for [Logger]'s Enabled method. type EnabledParameters struct { - Severity Severity + Severity Severity + EventName string } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go index 8cef5dde6..a9d3c439b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go @@ -119,7 +119,9 @@ func newTimeoutExporter(exp Exporter, timeout time.Duration) Exporter { // Export sets the timeout of ctx before calling the Exporter e wraps. func (e *timeoutExporter) Export(ctx context.Context, records []Record) error { - ctx, cancel := context.WithTimeout(ctx, e.timeout) + // This only used by the batch processor, and it takes processor timeout config. + // Thus, the error message points to the processor. So users know they should adjust the processor timeout. + ctx, cancel := context.WithTimeoutCause(ctx, e.timeout, errors.New("processor export timeout")) defer cancel() return e.Exporter.Export(ctx, records) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go index a39cad9e0..682f2eb2c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go @@ -57,4 +57,5 @@ type FilterProcessor interface { type EnabledParameters struct { InstrumentationScope instrumentation.Scope Severity log.Severity + EventName string } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index cd3580ec0..1ec8ff883 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -52,6 +52,7 @@ func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool p := EnabledParameters{ InstrumentationScope: l.instrumentationScope, Severity: param.Severity, + EventName: param.EventName, } // If there are more Processors than FilterProcessors, diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/record.go b/vendor/go.opentelemetry.io/otel/sdk/log/record.go index a13fcac7b..38fd65079 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/record.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/record.go @@ -387,11 +387,8 @@ func (r *Record) SetTraceFlags(flags trace.TraceFlags) { } // Resource returns the entity that collected the log. -func (r *Record) Resource() resource.Resource { - if r.resource == nil { - return *resource.Empty() - } - return *r.resource +func (r *Record) Resource() *resource.Resource { + return r.resource } // InstrumentationScope returns the scope that the Logger was created with. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index ebb9a0463..0a48aed74 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -202,7 +202,7 @@ func (r *PeriodicReader) aggregation( // collectAndExport gather all metric data related to the periodicReader r from // the SDK and exports it with r's exporter. func (r *PeriodicReader) collectAndExport(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := context.WithTimeoutCause(ctx, r.timeout, errors.New("reader collect and export timeout")) defer cancel() // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. @@ -278,7 +278,7 @@ func (r *PeriodicReader) ForceFlush(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.timeout) + ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader force flush timeout")) defer cancel() } @@ -311,7 +311,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, r.timeout) + ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader shutdown timeout")) defer cancel() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 2240c26e9..7bdb699ca 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -121,6 +121,14 @@ func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { // // This method is safe to call concurrently. func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { + // Only check if context is already cancelled before starting, not inside or after callback loops. + // If this method returns after executing some callbacks but before running all aggregations, + // internal aggregation state can be corrupted and result in incorrect data returned + // by future produce calls. + if err := ctx.Err(); err != nil { + return err + } + p.Lock() defer p.Unlock() @@ -130,12 +138,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if e := c(ctx); e != nil { err = errors.Join(err, e) } - if err := ctx.Err(); err != nil { - rm.Resource = nil - clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. - rm.ScopeMetrics = rm.ScopeMetrics[:0] - return err - } } for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) @@ -143,13 +145,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if e := f(ctx); e != nil { err = errors.Join(err, e) } - if err := ctx.Err(); err != nil { - // This means the context expired before we finished running callbacks. - rm.Resource = nil - clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. - rm.ScopeMetrics = rm.ScopeMetrics[:0] - return err - } } rm.Resource = p.resource diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index cda142c7e..0e5adc1a7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e15..cefe4ab91 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 5ecd859a5..0d8619715 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 813f05624..16a062ad8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 2d0f65498..781903923 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8a48ab4fa..01b4d27a0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 085fe68fd..6712ce80d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6872cbb4e..6966ed861 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "sync" "sync/atomic" "time" @@ -267,7 +268,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) defer cancel() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 8f4fc3850..1785a4bbb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 1af257449..c0217af6b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md new file mode 100644 index 000000000..02b56115e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md @@ -0,0 +1,4 @@ + +# Migration from v1.33.0 to v1.34.0 + +The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md new file mode 100644 index 000000000..fab06c975 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.34.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go new file mode 100644 index 000000000..5b5666257 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go @@ -0,0 +1,13851 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found [here]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found [here]. +// +// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found [here] + // . + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [here]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // ec2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: az +const ( + // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic + // conventions. It represents the [Azure Resource Provider Namespace] as + // recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzNamespaceKey = attribute.Key("az.namespace") + + // AzServiceRequestIDKey is the attribute Key conforming to the + // "az.service_request_id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzServiceRequestIDKey = attribute.Key("az.service_request_id") +) + +// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" +// semantic conventions. It represents the [Azure Resource Provider Namespace] as +// recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzNamespace(val string) attribute.KeyValue { + return AzNamespaceKey.String(val) +} + +// AzServiceRequestID returns an attribute KeyValue conforming to the +// "az.service_request_id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzServiceRequestID(val string) attribute.KeyValue { + return AzServiceRequestIDKey.String(val) +} + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // bounded_staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // consistent_prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // all + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // each_quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // local_quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // one + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // local_one + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // local_serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container runtime + // managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container runtime +// managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // user + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // system + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // iowait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found [here] + // . + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [here]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: development + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: development + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: development + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: development + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: development + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: development + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: development + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: development + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: development + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifer" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.request.service_tier" semantic conventions. It represents the + // service tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") + + // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") + + // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to + // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It + // represents a fingerprint to track any eventual change in the Generative AI + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as identified + // by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: openai + // Note: The `gen_ai.system` describes a family of GenAI models with specific + // model identified + // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // + // The actual GenAI product may differ from the one identified by the client. + // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI + // client + // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge, instead of the actual system. The + // `server.address` + // attribute may help identify the actual system in use for `openai`. + // + // For custom model, a custom friendly name SHOULD be used. + // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` + // . + GenAISystemKey = attribute.Key("gen_ai.system") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "gen_ai.openai.response.service_tier" semantic conventions. It represents the +// service tier used for the response. +func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { + return GenAIOpenAIResponseServiceTierKey.String(val) +} + +// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming +// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It +// represents a fingerprint to track any eventual change in the Generative AI +// environment. +func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return GenAIOpenAIResponseSystemFingerprintKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") +) + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.system +var ( + // OpenAI + // Stability: development + GenAISystemOpenAI = GenAISystemKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") + // Vertex AI + // Stability: development + GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") + // Gemini + // Stability: development + GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") + // Deprecated: Use 'gcp.vertex_ai' instead. + GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") + // Deprecated: Use 'gcp.gemini' instead. + GenAISystemGemini = GenAISystemKey.String("gemini") + // Anthropic + // Stability: development + GenAISystemAnthropic = GenAISystemKey.String("anthropic") + // Cohere + // Stability: development + GenAISystemCohere = GenAISystemKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") + // Azure OpenAI + // Stability: development + GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") + // IBM Watsonx AI + // Stability: development + GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") + // AWS Bedrock + // Stability: development + GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") + // Perplexity + // Stability: development + GenAISystemPerplexity = GenAISystemKey.String("perplexity") + // xAI + // Stability: development + GenAISystemXai = GenAISystemKey.String("xai") + // DeepSeek + // Stability: development + GenAISystemDeepseek = GenAISystemKey.String("deepseek") + // Groq + // Stability: development + GenAISystemGroq = GenAISystemKey.String("groq") + // Mistral AI + // Stability: development + GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Deprecated: Replaced by `output`. + GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") +) + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// Enum values for hw.state +var ( + // Ok + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") + // Deprecated: Replaced by `process`. + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") + // Deprecated: Replaced by `send`. + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("z_os") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // used + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // Deprecated: Removed, report shared memory usage with + // `metric.system.memory.shared` metric. + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // Deprecated: Replaced by `gitea`. + VCSProviderNameGittea = VCSProviderNameKey.String("gittea") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go new file mode 100644 index 000000000..2c5c7ebd0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.34.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go new file mode 100644 index 000000000..88a998f1e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/goconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/goconv/metric.go new file mode 100644 index 000000000..564ff8837 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/goconv/metric.go @@ -0,0 +1,508 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "go" namespace. +package goconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// MemoryTypeAttr is an attribute conforming to the go.memory.type semantic +// conventions. It represents the type of memory. +type MemoryTypeAttr string + +var ( + // MemoryTypeStack is the memory allocated from the heap that is reserved for + // stack space, whether or not it is currently in-use. + MemoryTypeStack MemoryTypeAttr = "stack" + // MemoryTypeOther is the memory used by the Go runtime, excluding other + // categories of memory usage described in this enumeration. + MemoryTypeOther MemoryTypeAttr = "other" +) + +// ConfigGogc is an instrument used to record metric values conforming to the +// "go.config.gogc" semantic conventions. It represents the heap size target +// percentage configured by the user, otherwise 100. +type ConfigGogc struct { + metric.Int64ObservableUpDownCounter +} + +// NewConfigGogc returns a new ConfigGogc instrument. +func NewConfigGogc( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (ConfigGogc, error) { + // Check if the meter is nil. + if m == nil { + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.config.gogc", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."), + metric.WithUnit("%"), + }, opt...)..., + ) + if err != nil { + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, err + } + return ConfigGogc{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ConfigGogc) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ConfigGogc) Name() string { + return "go.config.gogc" +} + +// Unit returns the semantic convention unit of the instrument +func (ConfigGogc) Unit() string { + return "%" +} + +// Description returns the semantic convention description of the instrument +func (ConfigGogc) Description() string { + return "Heap size target percentage configured by the user, otherwise 100." +} + +// GoroutineCount is an instrument used to record metric values conforming to the +// "go.goroutine.count" semantic conventions. It represents the count of live +// goroutines. +type GoroutineCount struct { + metric.Int64ObservableUpDownCounter +} + +// NewGoroutineCount returns a new GoroutineCount instrument. +func NewGoroutineCount( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (GoroutineCount, error) { + // Check if the meter is nil. + if m == nil { + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.goroutine.count", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Count of live goroutines."), + metric.WithUnit("{goroutine}"), + }, opt...)..., + ) + if err != nil { + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, err + } + return GoroutineCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GoroutineCount) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (GoroutineCount) Name() string { + return "go.goroutine.count" +} + +// Unit returns the semantic convention unit of the instrument +func (GoroutineCount) Unit() string { + return "{goroutine}" +} + +// Description returns the semantic convention description of the instrument +func (GoroutineCount) Description() string { + return "Count of live goroutines." +} + +// MemoryAllocated is an instrument used to record metric values conforming to +// the "go.memory.allocated" semantic conventions. It represents the memory +// allocated to the heap by the application. +type MemoryAllocated struct { + metric.Int64ObservableCounter +} + +// NewMemoryAllocated returns a new MemoryAllocated instrument. +func NewMemoryAllocated( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (MemoryAllocated, error) { + // Check if the meter is nil. + if m == nil { + return MemoryAllocated{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "go.memory.allocated", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("Memory allocated to the heap by the application."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryAllocated{noop.Int64ObservableCounter{}}, err + } + return MemoryAllocated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryAllocated) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryAllocated) Name() string { + return "go.memory.allocated" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryAllocated) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryAllocated) Description() string { + return "Memory allocated to the heap by the application." +} + +// MemoryAllocations is an instrument used to record metric values conforming to +// the "go.memory.allocations" semantic conventions. It represents the count of +// allocations to the heap by the application. +type MemoryAllocations struct { + metric.Int64ObservableCounter +} + +// NewMemoryAllocations returns a new MemoryAllocations instrument. +func NewMemoryAllocations( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (MemoryAllocations, error) { + // Check if the meter is nil. + if m == nil { + return MemoryAllocations{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "go.memory.allocations", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("Count of allocations to the heap by the application."), + metric.WithUnit("{allocation}"), + }, opt...)..., + ) + if err != nil { + return MemoryAllocations{noop.Int64ObservableCounter{}}, err + } + return MemoryAllocations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryAllocations) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryAllocations) Name() string { + return "go.memory.allocations" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryAllocations) Unit() string { + return "{allocation}" +} + +// Description returns the semantic convention description of the instrument +func (MemoryAllocations) Description() string { + return "Count of allocations to the heap by the application." +} + +// MemoryGCGoal is an instrument used to record metric values conforming to the +// "go.memory.gc.goal" semantic conventions. It represents the heap size target +// for the end of the GC cycle. +type MemoryGCGoal struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryGCGoal returns a new MemoryGCGoal instrument. +func NewMemoryGCGoal( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryGCGoal, error) { + // Check if the meter is nil. + if m == nil { + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.gc.goal", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target for the end of the GC cycle."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryGCGoal{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryGCGoal) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryGCGoal) Name() string { + return "go.memory.gc.goal" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryGCGoal) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryGCGoal) Description() string { + return "Heap size target for the end of the GC cycle." +} + +// MemoryLimit is an instrument used to record metric values conforming to the +// "go.memory.limit" semantic conventions. It represents the go runtime memory +// limit configured by the user, if a limit exists. +type MemoryLimit struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryLimit returns a new MemoryLimit instrument. +func NewMemoryLimit( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.limit", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryLimit) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryLimit) Name() string { + return "go.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryLimit) Description() string { + return "Go runtime memory limit configured by the user, if a limit exists." +} + +// MemoryUsed is an instrument used to record metric values conforming to the +// "go.memory.used" semantic conventions. It represents the memory used by the Go +// runtime. +type MemoryUsed struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryUsed returns a new MemoryUsed instrument. +func NewMemoryUsed( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryUsed, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.used", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Memory used by the Go runtime."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsed) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsed) Name() string { + return "go.memory.used" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsed) Description() string { + return "Memory used by the Go runtime." +} + +// AttrMemoryType returns an optional attribute for the "go.memory.type" semantic +// convention. It represents the type of memory. +func (MemoryUsed) AttrMemoryType(val MemoryTypeAttr) attribute.KeyValue { + return attribute.String("go.memory.type", string(val)) +} + +// ProcessorLimit is an instrument used to record metric values conforming to the +// "go.processor.limit" semantic conventions. It represents the number of OS +// threads that can execute user-level Go code simultaneously. +type ProcessorLimit struct { + metric.Int64ObservableUpDownCounter +} + +// NewProcessorLimit returns a new ProcessorLimit instrument. +func NewProcessorLimit( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (ProcessorLimit, error) { + // Check if the meter is nil. + if m == nil { + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.processor.limit", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."), + metric.WithUnit("{thread}"), + }, opt...)..., + ) + if err != nil { + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, err + } + return ProcessorLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessorLimit) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessorLimit) Name() string { + return "go.processor.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessorLimit) Unit() string { + return "{thread}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessorLimit) Description() string { + return "The number of OS threads that can execute user-level Go code simultaneously." +} + +// ScheduleDuration is an instrument used to record metric values conforming to +// the "go.schedule.duration" semantic conventions. It represents the time +// goroutines have spent in the scheduler in a runnable state before actually +// running. +type ScheduleDuration struct { + metric.Float64Histogram +} + +// NewScheduleDuration returns a new ScheduleDuration instrument. +func NewScheduleDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ScheduleDuration, error) { + // Check if the meter is nil. + if m == nil { + return ScheduleDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "go.schedule.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time goroutines have spent in the scheduler in a runnable state before actually running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ScheduleDuration{noop.Float64Histogram{}}, err + } + return ScheduleDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ScheduleDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ScheduleDuration) Name() string { + return "go.schedule.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ScheduleDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ScheduleDuration) Description() string { + return "The time goroutines have spent in the scheduler in a runnable state before actually running." +} + +// Record records val to the current distribution. +// +// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by +// the runtime, and are subject to change. +func (m ScheduleDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go new file mode 100644 index 000000000..3c23d4592 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index d90af8f67..f3aa39813 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ac3c0b15d..7afe92b59 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 79f82f3d0..9d4742a17 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.36.0 + version: v1.37.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,14 +22,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.58.0 + version: v0.59.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.12.0 + version: v0.13.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog @@ -40,6 +41,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/log/logtest - - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 97bd8b06f..db3264da8 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -509,7 +509,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } return nil, ErrFrameTooLarge } diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index c646a6952..3aaffdd1f 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -508,7 +508,7 @@ const eventsHTML = ` {{$el.When}} {{$el.ElapsedTime}} - {{$el.Title}} + {{$el.Title}} {{if $.Expanded}} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index d9bfa6e1e..1de0ce666 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,73 +1,102 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! Please read the gRPC -organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) -and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. +We welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance +rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before +proceeding. If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). +[Contributor License +Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create +your first PR, a link will be added as a comment that contains the steps needed +to complete this process. + +## Getting Started + +A great way to start is by searching through our open issues. [Unassigned issues +labeled as "help +wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee) +are especially nice for first-time contributors, as they should be well-defined +problems that already have agreed-upon solutions. + +## Code Style + +We follow [Google's published Go style +guide](https://google.github.io/styleguide/go/). Note that there are three +primary documents that make up this style guide; please follow them as closely +as possible. If a reviewer recommends something that contradicts those +guidelines, there may be valid reasons to do so, but it should be rare. ## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. + +How to get your contributions merged smoothly and quickly: - Create **small PRs** that are narrowly focused on **addressing a single - concern**. We often times receive PRs that are trying to fix several things at - a time, but only one fix is considered acceptable, nothing gets merged and - both author's & review's time is wasted. Create more PRs to address different - concerns and everyone will be happy. + concern**. We often receive PRs that attempt to fix several things at the same + time, and if one part of the PR has a problem, that will hold up the entire + PR. -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. +- For **speculative changes**, consider opening an issue and discussing it + first. If you are suggesting a behavioral or API change, consider starting + with a [gRFC proposal](https://github.com/grpc/proposal). Many new features + that are not bug fixes will require cross-language agreement. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. +- If you want to fix **formatting or style**, consider whether your changes are + an obvious improvement or might be considered a personal preference. If a + style change is based on preference, it likely will not be accepted. If it + corrects widely agreed-upon anti-patterns, then please do create a PR and + explain the benefits of the change. -- The grpc package should only depend on standard Go packages and a small number - of exceptions. If your contribution introduces new dependencies which are NOT - in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a - discussion with gRPC-Go authors and consultants. - -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). +- For correcting **misspellings**, please be aware that we use some terms that + are sometimes flagged by spell checkers. As an example, "if an only if" is + often written as "iff". Please do not make spelling correction changes unless + you are certain they are misspellings. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. - -- Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. - -- Maintain **clean commit history** and use **meaningful commit messages**. PRs - with messy commit history are difficult to review and won't be merged. Use - `rebase -i upstream/master` to curate your commit history and/or to bring in - latest changes from master (but avoid rebasing in the middle of a code - review). - -- Keep your PR up to date with upstream/master (if there are merge conflicts, we - can't really merge your change). +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. - **All tests need to be passing** before your change can be merged. We - recommend you **run tests locally** before creating your PR to catch breakages - early on. - - `./scripts/vet.sh` to catch vet errors - - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + recommend you run tests locally before creating your PR to catch breakages + early on: -- Exceptions to the rules can be made if there's a compelling reason for doing so. + - `./scripts/vet.sh` to catch vet errors. + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests. + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode. + + Note that we have a multi-module repo, so `go test` commands may need to be + run from the root of each module in order to cause all tests to run. + + *Alternatively*, you may find it easier to push your changes to your fork on + GitHub, which will trigger a GitHub Actions run that you can use to verify + everything is passing. + +- If you are adding a new file, make sure it has the **copyright message** + template at the top as a comment. You can copy the message from an existing + file and update the year. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. **If your contribution introduces new dependencies**, you will + need a discussion with gRPC-Go maintainers. A GitHub action check will run on + every PR, and will flag any transitive dependency changes from any public + package. + +- Unless your PR is trivial, you should **expect reviewer comments** that you + will need to address before merging. We'll label the PR as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale`, and we will automatically close it after 7 days if we don't hear back + from you. Please feel free to ping issues or bugs if you do not get a response + within a week. + +- Exceptions to the rules can be made if there's a compelling reason to do so. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index b572707c6..f9a88d597 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -32,6 +32,7 @@ import "google.golang.org/grpc" - [Low-level technical docs](Documentation) from this repository - [Performance benchmark][] - [Examples](examples) +- [Contribution guidelines](CONTRIBUTING.md) ## FAQ diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 825c31795..b1364a032 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 +// protoc-gen-go v1.36.6 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -858,133 +858,68 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, - 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, - 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, - 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, - 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, - 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, - 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, - 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, - 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, - 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" + + "\n" + + "\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" + + "\fGrpcLogEntry\x128\n" + + "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" + + "\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" + + "\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" + + "\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" + + "\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" + + "\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" + + "\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" + + "\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" + + "\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" + + "\x11payload_truncated\x18\n" + + " \x01(\bR\x10payloadTruncated\x12.\n" + + "\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" + + "\tEventType\x12\x16\n" + + "\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" + + "\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" + + "\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" + + "\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" + + "\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" + + "\x11EVENT_TYPE_CANCEL\x10\a\"B\n" + + "\x06Logger\x12\x12\n" + + "\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" + + "\rLOGGER_CLIENT\x10\x01\x12\x11\n" + + "\rLOGGER_SERVER\x10\x02B\t\n" + + "\apayload\"\xbb\x01\n" + + "\fClientHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vmethod_name\x18\x02 \x01(\tR\n" + + "methodName\x12\x1c\n" + + "\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" + + "\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" + + "\fServerHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" + + "\aTrailer\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vstatus_code\x18\x02 \x01(\rR\n" + + "statusCode\x12%\n" + + "\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" + + "\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" + + "\aMessage\x12\x16\n" + + "\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" + + "\x04data\x18\x02 \x01(\fR\x04data\"B\n" + + "\bMetadata\x126\n" + + "\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" + + "\aAddress\x123\n" + + "\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" + + "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" + + "\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" + + "\x04Type\x12\x10\n" + + "\fTYPE_UNKNOWN\x10\x00\x12\r\n" + + "\tTYPE_IPV4\x10\x01\x12\r\n" + + "\tTYPE_IPV6\x10\x02\x12\r\n" + + "\tTYPE_UNIX\x10\x03B\\\n" + + "\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3" var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4f350ca56..cd3eaf8dd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -689,22 +689,31 @@ func (cc *ClientConn) Connect() { cc.mu.Unlock() } -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { +// waitForResolvedAddrs blocks until the resolver provides addresses or the +// context expires, whichever happens first. +// +// Error is nil unless the context expires first; otherwise returns a status +// error based on the context. +// +// The returned boolean indicates whether it did block or not. If the +// resolution has already happened once before, it returns false without +// blocking. Otherwise, it wait for the resolution and return true if +// resolution has succeeded or return false along with error if resolution has +// failed. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) { // This is on the RPC path, so we use a fast path to avoid the // more-expensive "select" below after the resolver has returned once. if cc.firstResolveEvent.HasFired() { - return nil + return false, nil } + internal.NewStreamWaitingForResolver() select { case <-cc.firstResolveEvent.Done(): - return nil + return true, nil case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() + return false, status.FromContextError(ctx.Err()).Err() case <-cc.ctx.Done(): - return ErrClientConnClosing + return false, ErrClientConnClosing } } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 665e790bb..a63ab606e 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -120,6 +120,20 @@ type AuthInfo interface { AuthType() string } +// AuthorityValidator validates the authority used to override the `:authority` +// header. This is an optional interface that implementations of AuthInfo can +// implement if they support per-RPC authority overrides. It is invoked when the +// application attempts to override the HTTP/2 `:authority` header using the +// CallAuthority call option. +type AuthorityValidator interface { + // ValidateAuthority checks the authority value used to override the + // `:authority` header. The authority parameter is the override value + // provided by the application via the CallAuthority option. This value + // typically corresponds to the server hostname or endpoint the RPC is + // targeting. It returns non-nil error if the validation fails. + ValidateAuthority(authority string) error +} + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC // and the caller should not close rawConn. var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") @@ -207,14 +221,32 @@ type RequestInfo struct { AuthInfo AuthInfo } +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) return ri, ok } +// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it. +// +// This RequestInfo will be accessible via RequestInfoFromContext. +// +// Intended to be used from tests for PerRPCCredentials implementations (that +// often need to check connection's SecurityLevel). Should not be used from +// non-test code: the gRPC client already prepares a context with the correct +// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata. +// +// This API is experimental. +func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes // it possible to pass arbitrary data to the handshaker from gRPC, resolver, // balancer etc. Individual credential implementations control the actual diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4c805c644..93156c0f3 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -30,7 +30,7 @@ import ( // NewCredentials returns a credentials which disables transport security. // // Note that using this credentials with per-RPC credentials which require -// transport security is incompatible and will cause grpc.Dial() to fail. +// transport security is incompatible and will cause RPCs to fail. func NewCredentials() credentials.TransportCredentials { return insecureTC{} } @@ -71,6 +71,12 @@ func (info) AuthType() string { return "insecure" } +// ValidateAuthority allows any value to be overridden for the :authority +// header. +func (info) ValidateAuthority(string) error { + return nil +} + // insecureBundle implements an insecure bundle. // An insecure bundle provides a thin wrapper around insecureTC to support // the credentials.Bundle interface. diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index bd5fe22b6..20f65f7bd 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -22,6 +22,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/url" @@ -50,6 +51,21 @@ func (t TLSInfo) AuthType() string { return "tls" } +// ValidateAuthority validates the provided authority being used to override the +// :authority header by verifying it against the peer certificates. It returns a +// non-nil error if the validation fails. +func (t TLSInfo) ValidateAuthority(authority string) error { + var errs []error + for _, cert := range t.State.PeerCertificates { + var err error + if err = cert.VerifyHostname(authority); err == nil { + return nil + } + errs = append(errs, err) + } + return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...)) +} + // cipherSuiteLookup returns the string version of a TLS cipher suite ID. func cipherSuiteLookup(cipherSuiteID uint16) string { for _, s := range tls.CipherSuites() { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 405a2ffeb..050ba0f16 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -360,7 +360,7 @@ func WithReturnConnectionError() DialOption { // // Note that using this DialOption with per-RPC credentials (through // WithCredentialsBundle or WithPerRPCCredentials) which require transport -// security is incompatible and will cause grpc.Dial() to fail. +// security is incompatible and will cause RPCs to fail. // // Deprecated: use WithTransportCredentials and insecure.NewCredentials() // instead. Will be supported throughout 1.x. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index faa59e418..22d263fb9 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 +// protoc-gen-go v1.36.6 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -261,63 +261,29 @@ func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { var File_grpc_health_v1_health_proto protoreflect.FileDescriptor -var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, - 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, - 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, - 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, - 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x0d, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xfd, 0x01, - 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x05, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x70, 0x0a, - 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, - 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x76, 0x31, 0xa2, - 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x56, 0x31, 0xaa, 0x02, - 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_grpc_health_v1_health_proto_rawDesc = "" + + "\n" + + "\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" + + "\x12HealthCheckRequest\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" + + "\x13HealthCheckResponse\x12I\n" + + "\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" + + "\rServingStatus\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\v\n" + + "\aSERVING\x10\x01\x12\x0f\n" + + "\vNOT_SERVING\x10\x02\x12\x13\n" + + "\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" + + "\x11HealthListRequest\"\xc4\x01\n" + + "\x12HealthListResponse\x12L\n" + + "\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" + + "\rStatusesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x129\n" + + "\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" + + "\x06Health\x12P\n" + + "\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" + + "\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" + + "\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" + + "\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3" var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 9deee7f65..48b22d9cf 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -20,20 +20,6 @@ import ( "context" ) -// requestInfoKey is a struct to be used as the key to store RequestInfo in a -// context. -type requestInfoKey struct{} - -// NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri any) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) -} - -// RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) any { - return ctx.Value(requestInfoKey{}) -} - // clientHandshakeInfoKey is a struct used as the key to store // ClientHandshakeInfo in a context. type clientHandshakeInfoKey struct{} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index cc5713fd9..f5f2bdeb8 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,7 +36,7 @@ var ( // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", true) // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) @@ -69,6 +69,10 @@ var ( // to gRFC A76. It can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) + + // ALTSHandshakerKeepaliveParams is set if we should add the + // KeepaliveParams when dial the ALTS handshaker service. + ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 2eb97f832..e87551552 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -63,4 +63,9 @@ var ( // For more details, see: // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) + + // XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of + // trust. For more details, see: + // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md + XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go index fbe697c37..d788c2493 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -21,28 +21,25 @@ package grpcsync import ( - "sync" "sync/atomic" ) // Event represents a one-time event that may occur in the future. type Event struct { - fired int32 + fired atomic.Bool c chan struct{} - o sync.Once } // Fire causes e to complete. It is safe to call multiple times, and // concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. +// channel returned by Done to close. If Fire returns false, it is possible +// the Done channel has not been closed yet. func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) + if e.fired.CompareAndSwap(false, true) { close(e.c) - ret = true - }) - return ret + return true + } + return false } // Done returns a channel that will be closed when Fire is called. @@ -52,7 +49,7 @@ func (e *Event) Done() <-chan struct{} { // HasFired returns true if Fire has been called. func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 + return e.fired.Load() } // NewEvent returns a new, ready-to-use Event. diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 2ce012cda..3ac798e8e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -266,6 +266,13 @@ var ( TimeAfterFunc = func(d time.Duration, f func()) Timer { return time.AfterFunc(d, f) } + + // NewStreamWaitingForResolver is a test hook that is triggered when a + // new stream blocks while waiting for name resolution. This can be + // used in tests to synchronize resolver updates and avoid race conditions. + // When set, the function will be called before the stream enters + // the blocking state. + NewStreamWaitingForResolver = func() {} ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index c0e227577..20b8fb098 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -186,23 +186,15 @@ func (r *delegatingResolver) Close() { r.proxyResolver = nil } -func networkTypeFromAddr(addr resolver.Address) string { - networkType, ok := networktype.Get(addr) - if !ok { - networkType, _ = transport.ParseDialTarget(addr.Addr) - } - return networkType -} - -func isTCPAddressPresent(state *resolver.State) bool { +func needsProxyResolver(state *resolver.State) bool { for _, addr := range state.Addresses { - if networkType := networkTypeFromAddr(addr); networkType == "tcp" { + if !skipProxy(addr) { return true } } for _, endpoint := range state.Endpoints { for _, addr := range endpoint.Addresses { - if networktype := networkTypeFromAddr(addr); networktype == "tcp" { + if !skipProxy(addr) { return true } } @@ -210,6 +202,29 @@ func isTCPAddressPresent(state *resolver.State) bool { return false } +func skipProxy(address resolver.Address) bool { + // Avoid proxy when network is not tcp. + networkType, ok := networktype.Get(address) + if !ok { + networkType, _ = transport.ParseDialTarget(address.Addr) + } + if networkType != "tcp" { + return true + } + + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address.Addr, + }} + // Avoid proxy when address included in `NO_PROXY` environment variable or + // fails to get the proxy address. + url, err := HTTPSProxyFromEnvironment(req) + if err != nil || url == nil { + return true + } + return false +} + // updateClientConnStateLocked constructs a combined list of addresses by // pairing each proxy address with every target address of type TCP. For each // pair, it creates a new [resolver.Address] using the proxy address and @@ -240,8 +255,7 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { } var addresses []resolver.Address for _, targetAddr := range (*r.targetResolverState).Addresses { - // Avoid proxy when network is not tcp. - if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" { + if skipProxy(targetAddr) { addresses = append(addresses, targetAddr) continue } @@ -259,7 +273,7 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { var addrs []resolver.Address for _, targetAddr := range endpt.Addresses { // Avoid proxy when network is not tcp. - if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" { + if skipProxy(targetAddr) { addrs = append(addrs, targetAddr) continue } @@ -340,9 +354,10 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err logger.Infof("Addresses received from target resolver: %v", state.Addresses) } r.targetResolverState = &state - // If no addresses returned by resolver have network type as tcp , do not - // wait for proxy update. - if !isTCPAddressPresent(r.targetResolverState) { + // If all addresses returned by the target resolver have a non-TCP network + // type, or are listed in the `NO_PROXY` environment variable, do not wait + // for proxy update. + if !needsProxyResolver(r.targetResolverState) { return r.cc.UpdateState(*r.targetResolverState) } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 1186f1e9a..aad171cd0 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -236,3 +236,11 @@ func IsRestrictedControlPlaneCode(s *Status) bool { } return false } + +// RawStatusProto returns the internal protobuf message for use by gRPC itself. +func RawStatusProto(s *Status) *spb.Status { + if s == nil { + return nil + } + return s.s +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 171e690a3..ef56592b9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -545,7 +545,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -592,6 +592,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) + if timeout <= 0 { + return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { @@ -749,6 +752,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS callHdr = &newCallHdr } + // The authority specified via the `CallAuthority` CallOption takes the + // highest precedence when determining the `:authority` header. It overrides + // any value present in the Host field of CallHdr. Before applying this + // override, the authority string is validated. If the credentials do not + // implement the AuthorityValidator interface, or if validation fails, the + // RPC is failed with a status code of `UNAVAILABLE`. + if callHdr.Authority != "" { + auth, ok := t.authInfo.(credentials.AuthorityValidator) + if !ok { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())} + } + if err := auth.ValidateAuthority(callHdr.Authority); err != nil { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)} + } + newCallHdr := *callHdr + newCallHdr.Host = callHdr.Authority + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 7e53eb173..e4c3731bd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" @@ -1055,7 +1056,7 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { return nil } -// WriteStatus sends stream status to the client and terminates the stream. +// writeStatus sends stream status to the client and terminates the stream. // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. @@ -1083,7 +1084,7 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := st.Proto(); p != nil && len(p.Details) > 0 { + if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 { // Do not use the user's grpc-status-details-bin (if present) if we are // even attempting to set our own. delete(s.trailer, grpcStatusDetailsBinHeader) diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index f997f9fdb..607d2c4ce 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -196,11 +196,14 @@ func decodeTimeout(s string) (time.Duration, error) { if !ok { return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) } - t, err := strconv.ParseInt(s[:size-1], 10, 64) + t, err := strconv.ParseUint(s[:size-1], 10, 64) if err != nil { return 0, err } - const maxHours = math.MaxInt64 / int64(time.Hour) + if t == 0 { + return 0, fmt.Errorf("transport: timeout must be positive: %q", s) + } + const maxHours = math.MaxInt64 / uint64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. return time.Duration(math.MaxInt64), nil diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index af4a4aeab..1730a639f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -540,6 +540,11 @@ type CallHdr struct { PreviousAttempts int // value of grpc-previous-rpc-attempts header to set DoneFunc func() // called when the stream is finished + + // Authority is used to explicitly override the `:authority` header. If set, + // this value takes precedence over the Host field and will be used as the + // value for the `:authority` header. + Authority string } // ClientTransport is the common interface for all gRPC client-side transport diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index ad20e9dff..47ea09f5c 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -160,6 +160,7 @@ type callInfo struct { codec baseCodec maxRetryRPCBufferSize int onFinish []func(err error) + authority string } func defaultCallInfo() *callInfo { @@ -365,6 +366,36 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { } func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} +// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of +// an RPC to the specified value. When using CallAuthority, the credentials in +// use must implement the AuthorityValidator interface. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func CallAuthority(authority string) CallOption { + return AuthorityOverrideCallOption{Authority: authority} +} + +// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2 +// :authority header value to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a later +// release. +type AuthorityOverrideCallOption struct { + Authority string +} + +func (o AuthorityOverrideCallOption) before(c *callInfo) error { + c.authority = o.Authority + return nil +} + +func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {} + // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default // `math.MaxInt32`. diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index dc03731e4..67194a592 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -38,6 +38,15 @@ type RPCTagInfo struct { // FailFast indicates if this RPC is failfast. // This field is only valid on client side, it's always false on server side. FailFast bool + // NameResolutionDelay indicates if the RPC needed to wait for the + // initial name resolver update before it could begin. This should only + // happen if the channel is IDLE when the RPC is started. Note that + // all retry or hedging attempts for an RPC that experienced a delay + // will have it set. + // + // This field is only valid on the client side; it is always false on + // the server side. + NameResolutionDelay bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 12163150b..d58bb6471 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -101,9 +101,9 @@ type ClientStream interface { // It must only be called after stream.CloseAndRecv has returned, or // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. + // CloseSend closes the send direction of the stream. This method always + // returns a nil error. The status of the stream may be discovered using + // RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg. CloseSend() error // Context returns the context for this stream. // @@ -212,14 +212,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { + nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx) + if err != nil { return nil, err } var mc serviceconfig.MethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { - return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) } rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} @@ -257,7 +258,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -296,6 +297,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client Method: method, ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, + Authority: callInfo.authority, } // Set our outgoing compression according to the UseCompressor CallOption, if @@ -321,19 +323,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: callInfo, - cc: cc, - desc: desc, - codec: callInfo.codec, - compressorV0: compressorV0, - compressorV1: compressorV1, - cancel: cancel, - firstAttempt: true, - onCommit: onCommit, + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: callInfo, + cc: cc, + desc: desc, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + nameResolutionDelay: nameResolutionDelayed, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) @@ -417,7 +420,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) var beginTime time.Time shs := cs.cc.dopts.copts.StatsHandlers for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) beginTime = time.Now() begin := &stats.Begin{ Client: true, @@ -573,6 +576,9 @@ type clientStream struct { onCommit func() replayBuffer []replayOp // operations to replay on retry replayBufferSize int // current size of replayBuffer + // nameResolutionDelay indicates if there was a delay in the name resolution. + // This field is only valid on client side, it's always false on server side. + nameResolutionDelay bool } type replayOp struct { @@ -987,7 +993,7 @@ func (cs *clientStream) RecvMsg(m any) error { func (cs *clientStream) CloseSend() error { if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } cs.sentLast = true @@ -1008,7 +1014,10 @@ func (cs *clientStream) CloseSend() error { binlog.Log(cs.ctx, chc) } } - // We never returned an error here for reasons. + // We don't return an error here as we expect users to read all messages + // from the stream and get the RPC status from RecvMsg(). Note that + // SendMsg() must return an error when one occurs so the application + // knows to stop sending messages, but that does not apply here. return nil } @@ -1372,7 +1381,7 @@ func (as *addrConnStream) Trailer() metadata.MD { func (as *addrConnStream) CloseSend() error { if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } as.sentLast = true diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 51da8ed59..bd82673dc 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.72.1" +const Version = "1.73.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index fe3510756..d79bcfd15 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -289,7 +289,7 @@ codeberg.org/gruf/go-structr # github.com/DmitriyVTitov/size v1.5.0 ## explicit; go 1.14 github.com/DmitriyVTitov/size -# github.com/KimMachineGun/automemlimit v0.7.2 +# github.com/KimMachineGun/automemlimit v0.7.3 ## explicit; go 1.22.0 github.com/KimMachineGun/automemlimit/memlimit # github.com/Masterminds/goutils v1.1.1 @@ -323,7 +323,7 @@ github.com/boombuler/barcode/utils ## explicit; go 1.14 github.com/buckket/go-blurhash github.com/buckket/go-blurhash/base83 -# github.com/bytedance/sonic v1.13.2 +# github.com/bytedance/sonic v1.13.3 ## explicit; go 1.17 github.com/bytedance/sonic github.com/bytedance/sonic/ast @@ -411,13 +411,13 @@ github.com/felixge/httpsnoop ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/gabriel-vasile/mimetype v1.4.8 -## explicit; go 1.20 +# github.com/gabriel-vasile/mimetype v1.4.9 +## explicit; go 1.23.0 github.com/gabriel-vasile/mimetype github.com/gabriel-vasile/mimetype/internal/charset github.com/gabriel-vasile/mimetype/internal/json github.com/gabriel-vasile/mimetype/internal/magic -# github.com/gin-contrib/cors v1.7.5 +# github.com/gin-contrib/cors v1.7.6 ## explicit; go 1.23.0 github.com/gin-contrib/cors # github.com/gin-contrib/gzip v1.2.3 @@ -427,8 +427,8 @@ github.com/gin-contrib/gzip ## explicit; go 1.23.0 github.com/gin-contrib/sessions github.com/gin-contrib/sessions/memstore -# github.com/gin-contrib/sse v1.0.0 -## explicit; go 1.13 +# github.com/gin-contrib/sse v1.1.0 +## explicit; go 1.23 github.com/gin-contrib/sse # github.com/gin-gonic/gin v1.10.1 ## explicit; go 1.20 @@ -448,7 +448,7 @@ github.com/go-ini/ini github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher github.com/go-jose/go-jose/v4/json -# github.com/go-logr/logr v1.4.2 +# github.com/go-logr/logr v1.4.3 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -515,7 +515,7 @@ github.com/go-playground/universal-translator # github.com/go-playground/validator/v10 v10.26.0 ## explicit; go 1.20 github.com/go-playground/validator/v10 -# github.com/go-swagger/go-swagger v0.31.0 => codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix +# github.com/go-swagger/go-swagger v0.32.3 => codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix ## explicit; go 1.21 github.com/go-swagger/go-swagger/cmd/swagger github.com/go-swagger/go-swagger/cmd/swagger/commands @@ -583,7 +583,7 @@ github.com/gorilla/sessions # github.com/gorilla/websocket v1.5.3 ## explicit; go 1.12 github.com/gorilla/websocket -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -675,7 +675,7 @@ github.com/minio/crc64nvme # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.92 +# github.com/minio/minio-go/v7 v7.0.94 ## explicit; go 1.23.0 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/internal/json @@ -692,6 +692,7 @@ github.com/minio/minio-go/v7/pkg/signer github.com/minio/minio-go/v7/pkg/singleflight github.com/minio/minio-go/v7/pkg/sse github.com/minio/minio-go/v7/pkg/tags +github.com/minio/minio-go/v7/pkg/utils # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure @@ -733,7 +734,7 @@ github.com/oklog/ulid # github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 ## explicit; go 1.16 github.com/pbnjay/memory -# github.com/pelletier/go-toml/v2 v2.2.3 +# github.com/pelletier/go-toml/v2 v2.2.4 ## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters @@ -766,7 +767,7 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.64.0 +# github.com/prometheus/common v0.65.0 ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -810,9 +811,10 @@ github.com/sourcegraph/conc/panics github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.8.0 -## explicit; go 1.19 +# github.com/spf13/cast v1.9.2 +## explicit; go 1.21.0 github.com/spf13/cast +github.com/spf13/cast/internal # github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 github.com/spf13/cobra @@ -920,16 +922,16 @@ github.com/twitchyliquid64/golang-asm/objabi github.com/twitchyliquid64/golang-asm/src github.com/twitchyliquid64/golang-asm/sys github.com/twitchyliquid64/golang-asm/unsafeheader -# github.com/ugorji/go/codec v1.2.12 -## explicit; go 1.11 +# github.com/ugorji/go/codec v1.3.0 +## explicit; go 1.21 github.com/ugorji/go/codec # github.com/ulule/limiter/v3 v3.11.2 ## explicit; go 1.17 github.com/ulule/limiter/v3 github.com/ulule/limiter/v3/drivers/store/common github.com/ulule/limiter/v3/drivers/store/memory -# github.com/uptrace/bun v1.2.11 -## explicit; go 1.22.0 +# github.com/uptrace/bun v1.2.14 +## explicit; go 1.23.0 github.com/uptrace/bun github.com/uptrace/bun/dialect github.com/uptrace/bun/dialect/feature @@ -942,14 +944,14 @@ github.com/uptrace/bun/internal/tagparser github.com/uptrace/bun/migrate github.com/uptrace/bun/migrate/sqlschema github.com/uptrace/bun/schema -# github.com/uptrace/bun/dialect/pgdialect v1.2.11 -## explicit; go 1.22.0 +# github.com/uptrace/bun/dialect/pgdialect v1.2.14 +## explicit; go 1.23.0 github.com/uptrace/bun/dialect/pgdialect -# github.com/uptrace/bun/dialect/sqlitedialect v1.2.11 -## explicit; go 1.22.0 +# github.com/uptrace/bun/dialect/sqlitedialect v1.2.14 +## explicit; go 1.23.0 github.com/uptrace/bun/dialect/sqlitedialect -# github.com/uptrace/bun/extra/bunotel v1.2.11 -## explicit; go 1.22.0 +# github.com/uptrace/bun/extra/bunotel v1.2.14 +## explicit; go 1.23.0 github.com/uptrace/bun/extra/bunotel # github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 ## explicit; go 1.22 @@ -990,18 +992,18 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 +# go.opentelemetry.io/contrib/bridges/prometheus v0.62.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/prometheus -# go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 +# go.opentelemetry.io/contrib/exporters/autoexport v0.62.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 +# go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/runtime go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime go.opentelemetry.io/contrib/instrumentation/runtime/internal/x -# go.opentelemetry.io/otel v1.36.0 +# go.opentelemetry.io/otel v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1017,18 +1019,20 @@ go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 +go.opentelemetry.io/otel/semconv/v1.34.0 +go.opentelemetry.io/otel/semconv/v1.34.0/goconv go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal @@ -1036,7 +1040,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal @@ -1044,47 +1048,47 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.59.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 +# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdoutlog -# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 +# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace -# go.opentelemetry.io/otel/log v0.12.2 +# go.opentelemetry.io/otel/log v0.13.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded go.opentelemetry.io/otel/log/noop -# go.opentelemetry.io/otel/metric v1.36.0 +# go.opentelemetry.io/otel/metric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.36.0 +# go.opentelemetry.io/otel/sdk v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -1093,10 +1097,10 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/log v0.12.2 +# go.opentelemetry.io/otel/sdk/log v0.13.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/sdk/metric v1.36.0 +# go.opentelemetry.io/otel/sdk/metric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar @@ -1104,13 +1108,13 @@ go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.36.0 +# go.opentelemetry.io/otel/trace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.6.0 +# go.opentelemetry.io/proto/otlp v1.7.0 ## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/logs/v1 go.opentelemetry.io/proto/otlp/collector/metrics/v1 @@ -1128,7 +1132,7 @@ go.uber.org/automaxprocs/maxprocs # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# golang.org/x/arch v0.16.0 +# golang.org/x/arch v0.18.0 ## explicit; go 1.23.0 golang.org/x/arch/x86/x86asm # golang.org/x/crypto v0.39.0 @@ -1155,7 +1159,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf # golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 ## explicit; go 1.23.0 golang.org/x/exp/constraints -# golang.org/x/image v0.27.0 +# golang.org/x/image v0.28.0 ## explicit; go 1.23.0 golang.org/x/image/riff golang.org/x/image/vp8 @@ -1166,7 +1170,7 @@ golang.org/x/image/webp golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.40.0 +# golang.org/x/net v0.41.0 ## explicit; go 1.23.0 golang.org/x/net/bpf golang.org/x/net/context @@ -1245,15 +1249,15 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 +# google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.72.1 -## explicit; go 1.23 +# google.golang.org/grpc v1.73.0 +## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1402,12 +1406,12 @@ modernc.org/mathutil # modernc.org/memory v1.11.0 ## explicit; go 1.23.0 modernc.org/memory -# modernc.org/sqlite v1.37.1 => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.38.0-concurrency-workaround +# modernc.org/sqlite v1.38.0 => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.38.0-concurrency-workaround ## explicit; go 1.23.0 modernc.org/sqlite modernc.org/sqlite/lib # mvdan.cc/xurls/v2 v2.6.0 ## explicit; go 1.22.0 mvdan.cc/xurls/v2 -# github.com/go-swagger/go-swagger => codeberg.org/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix +# github.com/go-swagger/go-swagger => codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix # modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.38.0-concurrency-workaround