[chore] bump dependencies (#4406)

- codeberg.org/gruf/go-ffmpreg: v0.6.9 -> v0.6.10
- github.com/ncruces/go-sqlite3: v0.27.1 -> v0.28.0
- github.com/stretchr/testify: v1.10.0 -> v1.11.1
- github.com/tdewolff/minify/v2 v2.23.11 -> v2.24.2
- go.opentelemetry.io/otel{,/*}: v1.37.0 -> v1.38.0
- go.opentelemetry.io/contrib/*: v0.62.0 -> v0.63.0

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4406
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-09-04 15:29:27 +02:00 committed by kim
commit 78defcd916
274 changed files with 9213 additions and 2368 deletions

76
go.mod
View file

@ -22,7 +22,7 @@ require (
codeberg.org/gruf/go-errors/v2 v2.3.2 codeberg.org/gruf/go-errors/v2 v2.3.2
codeberg.org/gruf/go-fastcopy v1.1.3 codeberg.org/gruf/go-fastcopy v1.1.3
codeberg.org/gruf/go-fastpath/v2 v2.0.0 codeberg.org/gruf/go-fastpath/v2 v2.0.0
codeberg.org/gruf/go-ffmpreg v0.6.9 codeberg.org/gruf/go-ffmpreg v0.6.10
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf
codeberg.org/gruf/go-kv/v2 v2.0.7 codeberg.org/gruf/go-kv/v2 v2.0.7
codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f
@ -54,16 +54,16 @@ require (
github.com/miekg/dns v1.1.68 github.com/miekg/dns v1.1.68
github.com/minio/minio-go/v7 v7.0.95 github.com/minio/minio-go/v7 v7.0.95
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/ncruces/go-sqlite3 v0.27.1 github.com/ncruces/go-sqlite3 v0.28.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/pquerna/otp v1.5.0 github.com/pquerna/otp v1.5.0
github.com/rivo/uniseg v0.4.7 github.com/rivo/uniseg v0.4.7
github.com/spf13/cast v1.9.2 github.com/spf13/cast v1.9.2
github.com/spf13/cobra v1.9.1 github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.7 github.com/spf13/pflag v1.0.10
github.com/spf13/viper v1.20.1 github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.11.1
github.com/tdewolff/minify/v2 v2.23.11 github.com/tdewolff/minify/v2 v2.24.2
github.com/technologize/otel-go-contrib v1.1.1 github.com/technologize/otel-go-contrib v1.1.1
github.com/temoto/robotstxt v1.1.2 github.com/temoto/robotstxt v1.1.2
github.com/tetratelabs/wazero v1.9.0 github.com/tetratelabs/wazero v1.9.0
@ -75,13 +75,13 @@ require (
github.com/uptrace/bun/extra/bunotel v1.2.15 github.com/uptrace/bun/extra/bunotel v1.2.15
github.com/wagslane/go-password-validator v0.3.0 github.com/wagslane/go-password-validator v0.3.0
github.com/yuin/goldmark v1.7.13 github.com/yuin/goldmark v1.7.13
go.opentelemetry.io/contrib/exporters/autoexport v0.62.0 go.opentelemetry.io/contrib/exporters/autoexport v0.63.0
go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0 go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0
go.opentelemetry.io/otel v1.37.0 go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel/metric v1.37.0 go.opentelemetry.io/otel/metric v1.38.0
go.opentelemetry.io/otel/sdk v1.37.0 go.opentelemetry.io/otel/sdk v1.38.0
go.opentelemetry.io/otel/sdk/metric v1.37.0 go.opentelemetry.io/otel/sdk/metric v1.38.0
go.opentelemetry.io/otel/trace v1.37.0 go.opentelemetry.io/otel/trace v1.38.0
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.41.0 golang.org/x/crypto v0.41.0
golang.org/x/image v0.30.0 golang.org/x/image v0.30.0
@ -111,7 +111,7 @@ require (
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/bytedance/sonic v1.13.3 // indirect github.com/bytedance/sonic v1.13.3 // indirect
github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/bytedance/sonic/loader v0.2.4 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect github.com/cloudwego/base64x v0.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -127,7 +127,7 @@ require (
github.com/gin-contrib/sse v1.1.0 // indirect github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-errors/errors v1.1.1 // indirect github.com/go-errors/errors v1.1.1 // indirect
github.com/go-ini/ini v1.67.0 // indirect github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/analysis v0.23.0 // indirect
@ -155,7 +155,7 @@ require (
github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/securecookie v1.1.2 // indirect
github.com/gorilla/sessions v1.4.0 // indirect github.com/gorilla/sessions v1.4.0 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/huandu/xstrings v1.4.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
@ -187,10 +187,10 @@ require (
github.com/philhofer/fwd v1.2.0 // indirect github.com/philhofer/fwd v1.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_golang v1.23.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/common v0.65.0 // indirect
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f // indirect github.com/prometheus/otlptranslator v0.0.2 // indirect
github.com/prometheus/procfs v0.17.0 // indirect github.com/prometheus/procfs v0.17.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
@ -202,7 +202,7 @@ require (
github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect github.com/spf13/afero v1.12.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect
github.com/tdewolff/parse/v2 v2.8.2-0.20250806174018-50048bb39781 // indirect github.com/tdewolff/parse/v2 v2.8.3 // indirect
github.com/tinylib/msgp v1.3.0 // indirect github.com/tinylib/msgp v1.3.0 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/toqueteos/webbrowser v1.2.0 // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect
@ -213,31 +213,31 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
go.mongodb.org/mongo-driver v1.17.3 // indirect go.mongodb.org/mongo-driver v1.17.3 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/bridges/prometheus v0.62.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.59.1 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect
go.opentelemetry.io/otel/log v0.13.0 // indirect go.opentelemetry.io/otel/log v0.14.0 // indirect
go.opentelemetry.io/otel/sdk/log v0.13.0 // indirect go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.18.0 // indirect golang.org/x/arch v0.18.0 // indirect
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
golang.org/x/mod v0.26.0 // indirect golang.org/x/mod v0.26.0 // indirect
golang.org/x/sync v0.16.0 // indirect golang.org/x/sync v0.16.0 // indirect
golang.org/x/tools v0.35.0 // indirect golang.org/x/tools v0.35.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/grpc v1.73.0 // indirect google.golang.org/grpc v1.75.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
modernc.org/libc v1.66.3 // indirect modernc.org/libc v1.66.3 // indirect
modernc.org/mathutil v1.7.1 // indirect modernc.org/mathutil v1.7.1 // indirect

160
go.sum generated
View file

@ -28,8 +28,8 @@ codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7x
codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s=
codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0= codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0=
codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q= codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q=
codeberg.org/gruf/go-ffmpreg v0.6.9 h1:EbadyKAekYwwUlKC+4VBZhhN0iPm2uP3T1nPFSWkFb4= codeberg.org/gruf/go-ffmpreg v0.6.10 h1:02QvdA4p7UExT/Df+x7omnZbQtdjUf6RmXt5NsvR+LE=
codeberg.org/gruf/go-ffmpreg v0.6.9/go.mod h1:tGqIMh/I2cizqauxxNAN+WGkICI0j5G3xwF1uBkyw1E= codeberg.org/gruf/go-ffmpreg v0.6.10/go.mod h1:tGqIMh/I2cizqauxxNAN+WGkICI0j5G3xwF1uBkyw1E=
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A= codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A=
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk= codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk=
codeberg.org/gruf/go-kv v1.6.5 h1:ttPf0NA8F79pDqBttSudPTVCZmGncumeNIxmeM9ztz0= codeberg.org/gruf/go-kv v1.6.5 h1:ttPf0NA8F79pDqBttSudPTVCZmGncumeNIxmeM9ztz0=
@ -96,8 +96,8 @@ github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
@ -163,8 +163,8 @@ github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYN
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -257,8 +257,8 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
@ -340,8 +340,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-sqlite3 v0.27.1 h1:suqlM7xhSyDVMV9RgX99MCPqt9mB6YOCzHZuiI36K34= github.com/ncruces/go-sqlite3 v0.28.0 h1:AQVTUPgfamONl09LS+4rGFbHmLKM8/QrJJJi1UukjEQ=
github.com/ncruces/go-sqlite3 v0.27.1/go.mod h1:gpF5s+92aw2MbDmZK0ZOnCdFlpe11BH20CTspVqri0c= github.com/ncruces/go-sqlite3 v0.28.0/go.mod h1:WqvLhYwtEiZzg1H8BIeahUv/DxbmR+3xG5jDHDiBAGk=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M= github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
@ -364,14 +364,14 @@ github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f h1:QQB6SuvGZjK8kdc2YaLJpYhV8fxauOsjE6jgcL6YJ8Q= github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ=
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
@ -410,11 +410,11 @@ github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -427,14 +427,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tdewolff/minify/v2 v2.23.11 h1:cZqTVCtuVvPC8/GbCvYgIcdAQGmoxEObZzKeKIUixTE= github.com/tdewolff/minify/v2 v2.24.2 h1:vnY3nTulEAbCAAlxTxPPDkzG24rsq31SOzp63yT+7mo=
github.com/tdewolff/minify/v2 v2.23.11/go.mod h1:vmkbfGQ5hp/eYB+TswNWKma67S0a+32HBL+mFWxjZ2Q= github.com/tdewolff/minify/v2 v2.24.2/go.mod h1:1JrCtoZXaDbqioQZfk3Jdmr0GPJKiU7c1Apmb+7tCeE=
github.com/tdewolff/parse/v2 v2.8.2-0.20250806174018-50048bb39781 h1:2qicgFovKg1XtX7Wf6GwexUdpb7q/jMIE2IgkYsVAvE= github.com/tdewolff/parse/v2 v2.8.3 h1:5VbvtJ83cfb289A1HzRA9sf02iT8YyUwN84ezjkdY1I=
github.com/tdewolff/parse/v2 v2.8.2-0.20250806174018-50048bb39781/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo= github.com/tdewolff/parse/v2 v2.8.3/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo=
github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE= github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE=
github.com/tdewolff/test v1.0.11/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8= github.com/tdewolff/test v1.0.11/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8=
github.com/technologize/otel-go-contrib v1.1.1 h1:wZH9aSPNWZWIkEh3vfaKfMb15AJ80jJ1aVj/4GZdqIw= github.com/technologize/otel-go-contrib v1.1.1 h1:wZH9aSPNWZWIkEh3vfaKfMb15AJ80jJ1aVj/4GZdqIw=
@ -516,52 +516,52 @@ go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeH
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/bridges/prometheus v0.62.0 h1:0mfk3D3068LMGpIhxwc0BqRlBOBHVgTP9CygmnJM/TI= go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4=
go.opentelemetry.io/contrib/bridges/prometheus v0.62.0/go.mod h1:hStk98NJy1wvlrXIqWsli+uELxRRseBMld+gfm2xPR4= go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E=
go.opentelemetry.io/contrib/exporters/autoexport v0.62.0 h1:aCpZ6vvmOj5GHg1eUygjS/05mlQaEBsQDdTw5yT8EsE= go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY=
go.opentelemetry.io/contrib/exporters/autoexport v0.62.0/go.mod h1:1xHkmmL3bQm8m86HVoZTdgK/LIY5JpxdAWjog6cdtUs= go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g=
go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0 h1:ZIt0ya9/y4WyRIzfLC8hQRRsWg0J9M9GyaGtIMiElZI= go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0 h1:PeBoRj6af6xMI7qCupwFvTbbnd49V7n5YpG6pg8iDYQ=
go.opentelemetry.io/contrib/instrumentation/runtime v0.62.0/go.mod h1:F1aJ9VuiKWOlWwKdTYDUp1aoS0HzQxg38/VLxKmhm5U= go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0/go.mod h1:ingqBCtMCe8I4vpz/UVzCW6sxoqgZB37nao91mLQ3Bw=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0 h1:z6lNIajgEBVtQZHjfw2hAccPEBDs+nx58VemmXWa2ec= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.13.0/go.mod h1:+kyc3bRx/Qkq05P6OCu3mTEIOxYRYzoIg+JsUp5X+PM= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0 h1:zUfYw8cscHHLwaY8Xz3fiJu+R59xBnkgq2Zr1lwmK/0= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.13.0/go.mod h1:514JLMCcFLQFS8cnTepOk6I09cKWJ5nGHBxHrMJ8Yfg= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 h1:zG8GlgXCJQd5BU98C0hZnBbElszTmUgCNCfYneaDL0A= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0/go.mod h1:hOfBCz8kv/wuq73Mx2H2QnWokh/kHZxkh6SNF2bdKtw= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 h1:9PgnL3QNlj10uGxExowIDIZu66aVBwWhXmbOp1pa6RA= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0/go.mod h1:0ineDcLELf6JmKfuo0wvvhAVMuxWFYvkTin2iV4ydPQ= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
go.opentelemetry.io/otel/exporters/prometheus v0.59.1 h1:HcpSkTkJbggT8bjYP+BjyqPWlD17BH9C5CYNKeDzmcA= go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo=
go.opentelemetry.io/otel/exporters/prometheus v0.59.1/go.mod h1:0FJL+gjuUoM07xzik3KPBaN+nz/CoB15kV6WLMiXZag= go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk=
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 h1:yEX3aC9KDgvYPhuKECHbOlr5GLwH6KTjLJ1sBSkkxkc= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8=
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0/go.mod h1:/GXR0tBmmkxDaCUGahvksvp66mx4yh5+cFXgSlhg0vQ= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 h1:6VjV6Et+1Hd2iLZEPtdV7vie80Yyqf7oikJLjQ/myi0= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0/go.mod h1:u8hcp8ji5gaM/RfcOo8z9NMnf1pVLfVY7lBY2VOGuUU= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls= go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM=
go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E= go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/log v0.13.0 h1:I3CGUszjM926OphK8ZdzF+kLqFvfRY/IIoFq/TjwfaQ= go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg=
go.opentelemetry.io/otel/sdk/log v0.13.0/go.mod h1:lOrQyCCXmpZdN7NchXb6DOZZa1N5G1R2tm5GMMTpDBw= go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM=
go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 h1:9yio6AFZ3QD9j9oqshV1Ibm9gPLlHNxurno5BreMtIA= go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM=
go.opentelemetry.io/otel/sdk/log/logtest v0.13.0/go.mod h1:QOGiAJHl+fob8Nu85ifXfuQYmJTFAvcrxL6w5/tu168= go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA=
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -669,14 +669,16 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

Binary file not shown.

View file

@ -1,7 +1,7 @@
package backoff package backoff
import ( import (
"math/rand" "math/rand/v2"
"time" "time"
) )
@ -28,13 +28,7 @@ multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval. Note: MaxInterval caps the RetryInterval and not the randomized interval.
If the time elapsed since an ExponentialBackOff instance is created goes past the Example: Given the following default arguments, for 9 tries the sequence will be:
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
The elapsed time can be reset by calling Reset().
Example: Given the following default arguments, for 10 tries the sequence will be,
and assuming we go over the MaxElapsedTime on the 10th try:
Request # RetryInterval (seconds) Randomized Interval (seconds) Request # RetryInterval (seconds) Randomized Interval (seconds)
@ -47,7 +41,6 @@ and assuming we go over the MaxElapsedTime on the 10th try:
7 5.692 [2.846, 8.538] 7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807] 8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210] 9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Note: Implementation is not thread-safe. Note: Implementation is not thread-safe.
*/ */

View file

@ -47,7 +47,7 @@ func WithNotify(n Notify) RetryOption {
} }
} }
// WithMaxTries limits the number of retry attempts. // WithMaxTries limits the number of all attempts.
func WithMaxTries(n uint) RetryOption { func WithMaxTries(n uint) RetryOption {
return func(args *retryOptions) { return func(args *retryOptions) {
args.MaxTries = n args.MaxTries = n
@ -97,7 +97,7 @@ func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOpti
// Handle permanent errors without retrying. // Handle permanent errors without retrying.
var permanent *PermanentError var permanent *PermanentError
if errors.As(err, &permanent) { if errors.As(err, &permanent) {
return res, err return res, permanent.Unwrap()
} }
// Stop retrying if context is cancelled. // Stop retrying if context is cancelled.

View file

@ -1,3 +1,8 @@
## Changed
- Defined a custom error, ErrUnexpectedSignatureAlgorithm, returned when a JWS
header contains an unsupported signature algorithm.
# v4.0.4 # v4.0.4
## Fixed ## Fixed

View file

@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr
if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) { if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) {
return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms) return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms)
} }
if alg != "" && !containsContentEncryption(contentEncryption, enc) { if enc != "" && !containsContentEncryption(contentEncryption, enc) {
return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption) return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption)
} }
return nil return nil
@ -288,11 +288,20 @@ func ParseEncryptedCompact(
keyAlgorithms []KeyAlgorithm, keyAlgorithms []KeyAlgorithm,
contentEncryption []ContentEncryption, contentEncryption []ContentEncryption,
) (*JSONWebEncryption, error) { ) (*JSONWebEncryption, error) {
// Five parts is four separators var parts [5]string
if strings.Count(input, ".") != 4 { var ok bool
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
for i := range 4 {
parts[i], input, ok = strings.Cut(input, ".")
if !ok {
return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
}
} }
parts := strings.SplitN(input, ".", 5) // Validate that the last part does not contain more dots
if strings.ContainsRune(input, '.') {
return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
}
parts[4] = input
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
if err != nil { if err != nil {

View file

@ -239,7 +239,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
keyPub = key keyPub = key
} }
} else { } else {
return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) return fmt.Errorf("go-jose/go-jose: unknown curve '%s'", raw.Crv)
} }
default: default:
return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)

View file

@ -75,7 +75,14 @@ type Signature struct {
original *rawSignatureInfo original *rawSignatureInfo
} }
// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. // ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. Validation fails if
// the JWS is signed with an algorithm that isn't in the provided list of signature algorithms.
// Applications should decide for themselves which signature algorithms are acceptable. If you're
// not sure which signature algorithms your application might receive, consult the documentation of
// the program which provides them or the protocol that you are implementing. You can also try
// getting an example JWS and decoding it with a tool like https://jwt.io to see what its "alg"
// header parameter indicates. The signature on the JWS does not get validated during parsing. Call
// Verify() after parsing to validate the signature and obtain the payload.
// //
// https://datatracker.ietf.org/doc/html/rfc7515#section-7 // https://datatracker.ietf.org/doc/html/rfc7515#section-7
func ParseSigned( func ParseSigned(
@ -90,7 +97,14 @@ func ParseSigned(
return parseSignedCompact(signature, nil, signatureAlgorithms) return parseSignedCompact(signature, nil, signatureAlgorithms)
} }
// ParseSignedCompact parses a message in JWS Compact Serialization. // ParseSignedCompact parses a message in JWS Compact Serialization. Validation fails if the JWS is
// signed with an algorithm that isn't in the provided list of signature algorithms. Applications
// should decide for themselves which signature algorithms are acceptable.If you're not sure which
// signature algorithms your application might receive, consult the documentation of the program
// which provides them or the protocol that you are implementing. You can also try getting an
// example JWS and decoding it with a tool like https://jwt.io to see what its "alg" header
// parameter indicates. The signature on the JWS does not get validated during parsing. Call
// Verify() after parsing to validate the signature and obtain the payload.
// //
// https://datatracker.ietf.org/doc/html/rfc7515#section-7.1 // https://datatracker.ietf.org/doc/html/rfc7515#section-7.1
func ParseSignedCompact( func ParseSignedCompact(
@ -101,6 +115,15 @@ func ParseSignedCompact(
} }
// ParseDetached parses a signed message in compact serialization format with detached payload. // ParseDetached parses a signed message in compact serialization format with detached payload.
// Validation fails if the JWS is signed with an algorithm that isn't in the provided list of
// signature algorithms. Applications should decide for themselves which signature algorithms are
// acceptable. If you're not sure which signature algorithms your application might receive, consult
// the documentation of the program which provides them or the protocol that you are implementing.
// You can also try getting an example JWS and decoding it with a tool like https://jwt.io to see
// what its "alg" header parameter indicates. The signature on the JWS does not get validated during
// parsing. Call Verify() after parsing to validate the signature and obtain the payload.
//
// https://datatracker.ietf.org/doc/html/rfc7515#appendix-F
func ParseDetached( func ParseDetached(
signature string, signature string,
payload []byte, payload []byte,
@ -181,6 +204,25 @@ func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureA
return false return false
} }
// ErrUnexpectedSignatureAlgorithm is returned when the signature algorithm in
// the JWS header does not match one of the expected algorithms.
type ErrUnexpectedSignatureAlgorithm struct {
// Got is the signature algorithm found in the JWS header.
Got SignatureAlgorithm
expected []SignatureAlgorithm
}
func (e *ErrUnexpectedSignatureAlgorithm) Error() string {
return fmt.Sprintf("unexpected signature algorithm %q; expected %q", e.Got, e.expected)
}
func newErrUnexpectedSignatureAlgorithm(got SignatureAlgorithm, expected []SignatureAlgorithm) error {
return &ErrUnexpectedSignatureAlgorithm{
Got: got,
expected: expected,
}
}
// sanitized produces a cleaned-up JWS object from the raw JSON. // sanitized produces a cleaned-up JWS object from the raw JSON.
func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) { func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) {
if len(signatureAlgorithms) == 0 { if len(signatureAlgorithms) == 0 {
@ -236,8 +278,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
alg := SignatureAlgorithm(signature.Header.Algorithm) alg := SignatureAlgorithm(signature.Header.Algorithm)
if !containsSignatureAlgorithm(signatureAlgorithms, alg) { if !containsSignatureAlgorithm(signatureAlgorithms, alg) {
return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms)
alg, signatureAlgorithms)
} }
if signature.header != nil { if signature.header != nil {
@ -285,8 +326,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm) alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm)
if !containsSignatureAlgorithm(signatureAlgorithms, alg) { if !containsSignatureAlgorithm(signatureAlgorithms, alg) {
return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms)
alg, signatureAlgorithms)
} }
if obj.Signatures[i].header != nil { if obj.Signatures[i].header != nil {
@ -321,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
return obj, nil return obj, nil
} }
const tokenDelim = "."
// parseSignedCompact parses a message in compact format. // parseSignedCompact parses a message in compact format.
func parseSignedCompact( func parseSignedCompact(
input string, input string,
payload []byte, payload []byte,
signatureAlgorithms []SignatureAlgorithm, signatureAlgorithms []SignatureAlgorithm,
) (*JSONWebSignature, error) { ) (*JSONWebSignature, error) {
// Three parts is two separators protected, s, ok := strings.Cut(input, tokenDelim)
if strings.Count(input, ".") != 2 { if !ok { // no period found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
claims, sig, ok := strings.Cut(s, tokenDelim)
if !ok { // only one period found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
if strings.ContainsRune(sig, '.') { // too many periods found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
} }
parts := strings.SplitN(input, ".", 3)
if parts[1] != "" && payload != nil { if claims != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
} }
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) rawProtected, err := base64.RawURLEncoding.DecodeString(protected)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if payload == nil { if payload == nil {
payload, err = base64.RawURLEncoding.DecodeString(parts[1]) payload, err = base64.RawURLEncoding.DecodeString(claims)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
signature, err := base64.RawURLEncoding.DecodeString(parts[2]) signature, err := base64.RawURLEncoding.DecodeString(sig)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -30,8 +30,6 @@ import (
"hash" "hash"
"io" "io"
"golang.org/x/crypto/pbkdf2"
josecipher "github.com/go-jose/go-jose/v4/cipher" josecipher "github.com/go-jose/go-jose/v4/cipher"
) )
@ -330,7 +328,10 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
// derive key // derive key
keyLen, h := getPbkdf2Params(alg) keyLen, h := getPbkdf2Params(alg)
key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h) key, err := pbkdf2Key(h, string(ctx.key), salt, ctx.p2c, keyLen)
if err != nil {
return recipientInfo{}, nil
}
// use AES cipher with derived key // use AES cipher with derived key
block, err := aes.NewCipher(key) block, err := aes.NewCipher(key)
@ -432,7 +433,10 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
// derive key // derive key
keyLen, h := getPbkdf2Params(alg) keyLen, h := getPbkdf2Params(alg)
key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h) key, err := pbkdf2Key(h, string(ctx.key), salt, p2c, keyLen)
if err != nil {
return nil, err
}
// use AES cipher with derived key // use AES cipher with derived key
block, err := aes.NewCipher(key) block, err := aes.NewCipher(key)

View file

@ -0,0 +1,28 @@
//go:build go1.24
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto/pbkdf2"
"hash"
)
func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) {
return pbkdf2.Key(h, password, salt, iter, keyLen)
}

View file

@ -0,0 +1,29 @@
//go:build !go1.24
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"hash"
"golang.org/x/crypto/pbkdf2"
)
func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) {
return pbkdf2.Key([]byte(password), salt, iter, keyLen, h), nil
}

View file

@ -604,15 +604,6 @@ func (r resultRowsAffected) RowsAffected() (int64, error) {
return int64(r), nil return int64(r), nil
} }
type rows struct {
ctx context.Context
*stmt
names []string
types []string
nulls []bool
scans []scantype
}
type scantype byte type scantype byte
const ( const (
@ -648,10 +639,20 @@ func scanFromDecl(decl string) scantype {
return _ANY return _ANY
} }
type rows struct {
ctx context.Context
*stmt
names []string
types []string
nulls []bool
scans []scantype
}
var ( var (
// Ensure these interfaces are implemented: // Ensure these interfaces are implemented:
_ driver.RowsColumnTypeDatabaseTypeName = &rows{} _ driver.RowsColumnTypeDatabaseTypeName = &rows{}
_ driver.RowsColumnTypeNullable = &rows{} _ driver.RowsColumnTypeNullable = &rows{}
// _ driver.RowsColumnScanner = &rows{}
) )
func (r *rows) Close() error { func (r *rows) Close() error {
@ -740,7 +741,7 @@ func (r *rows) ColumnTypeScanType(index int) (typ reflect.Type) {
switch { switch {
case scan == _TIME && val != _BLOB && val != _NULL: case scan == _TIME && val != _BLOB && val != _NULL:
t := r.Stmt.ColumnTime(index, r.tmRead) t := r.Stmt.ColumnTime(index, r.tmRead)
useValType = t == time.Time{} useValType = t.IsZero()
case scan == _BOOL && val == _INT: case scan == _BOOL && val == _INT:
i := r.Stmt.ColumnInt64(index) i := r.Stmt.ColumnInt64(index)
useValType = i != 0 && i != 1 useValType = i != 0 && i != 1
@ -830,3 +831,23 @@ func (r *rows) Next(dest []driver.Value) error {
} }
return nil return nil
} }
func (r *rows) ScanColumn(dest any, index int) error {
// notest // Go 1.26
var ptr *time.Time
switch d := dest.(type) {
case *time.Time:
ptr = d
case *sql.NullTime:
ptr = &d.Time
case *sql.Null[time.Time]:
ptr = &d.V
default:
return driver.ErrSkip
}
if t := r.Stmt.ColumnTime(index, r.tmRead); !t.IsZero() {
*ptr = t
return nil
}
return driver.ErrSkip
}

View file

@ -1,6 +1,6 @@
# Embeddable Wasm build of SQLite # Embeddable Wasm build of SQLite
This folder includes an embeddable Wasm build of SQLite 3.50.3 for use with This folder includes an embeddable Wasm build of SQLite 3.50.4 for use with
[`github.com/ncruces/go-sqlite3`](https://pkg.go.dev/github.com/ncruces/go-sqlite3). [`github.com/ncruces/go-sqlite3`](https://pkg.go.dev/github.com/ncruces/go-sqlite3).
The following optional features are compiled in: The following optional features are compiled in:

View file

@ -17,7 +17,8 @@ trap 'rm -f sqlite3.tmp' EXIT
-mmutable-globals -mnontrapping-fptoint \ -mmutable-globals -mnontrapping-fptoint \
-msimd128 -mbulk-memory -msign-ext \ -msimd128 -mbulk-memory -msign-ext \
-mreference-types -mmultivalue \ -mreference-types -mmultivalue \
-fno-stack-protector -fno-stack-clash-protection \ -mno-extended-const \
-fno-stack-protector \
-Wl,--stack-first \ -Wl,--stack-first \
-Wl,--import-undefined \ -Wl,--import-undefined \
-Wl,--initial-memory=327680 \ -Wl,--initial-memory=327680 \

Binary file not shown.

View file

@ -59,7 +59,7 @@ func (c *Conn) CreateCollation(name string, fn CollatingFunction) error {
return c.error(rc) return c.error(rc)
} }
// Collating function is the type of a collation callback. // CollatingFunction is the type of a collation callback.
// Implementations must not retain a or b. // Implementations must not retain a or b.
type CollatingFunction func(a, b []byte) int type CollatingFunction func(a, b []byte) int
@ -132,7 +132,7 @@ func (c *Conn) CreateWindowFunction(name string, nArg int, flag FunctionFlag, fn
if win, ok := agg.(WindowFunction); ok { if win, ok := agg.(WindowFunction); ok {
return win return win
} }
return windowFunc{agg, name} return agg
})) }))
} }
rc := res_t(c.call("sqlite3_create_window_function_go", rc := res_t(c.call("sqlite3_create_window_function_go",
@ -307,13 +307,3 @@ func (a *aggregateFunc) Close() error {
a.stop() a.stop()
return nil return nil
} }
type windowFunc struct {
AggregateFunction
name string
}
func (w windowFunc) Inverse(ctx Context, arg ...Value) {
// Implementing inverse allows certain queries that don't really need it to succeed.
ctx.ResultError(util.ErrorString(w.name + ": may not be used as a window function"))
}

View file

@ -20,20 +20,6 @@ func ExportFuncVI[T0 i32](mod wazero.HostModuleBuilder, name string, fn func(con
Export(name) Export(name)
} }
type funcVII[T0, T1 i32] func(context.Context, api.Module, T0, T1)
func (fn funcVII[T0, T1]) Call(ctx context.Context, mod api.Module, stack []uint64) {
_ = stack[1] // prevent bounds check on every slice access
fn(ctx, mod, T0(stack[0]), T1(stack[1]))
}
func ExportFuncVII[T0, T1 i32](mod wazero.HostModuleBuilder, name string, fn func(context.Context, api.Module, T0, T1)) {
mod.NewFunctionBuilder().
WithGoModuleFunction(funcVII[T0, T1](fn),
[]api.ValueType{api.ValueTypeI32, api.ValueTypeI32}, nil).
Export(name)
}
type funcVIII[T0, T1, T2 i32] func(context.Context, api.Module, T0, T1, T2) type funcVIII[T0, T1, T2 i32] func(context.Context, api.Module, T0, T1, T2)
func (fn funcVIII[T0, T1, T2]) Call(ctx context.Context, mod api.Module, stack []uint64) { func (fn funcVIII[T0, T1, T2]) Call(ctx context.Context, mod api.Module, stack []uint64) {

View file

@ -0,0 +1,102 @@
package vfsutil
import (
"io"
"github.com/ncruces/go-sqlite3"
"github.com/ncruces/go-sqlite3/vfs"
)
// SliceFile implements [vfs.File] with a byte slice.
// It is suitable for temporary files (such as [vfs.OPEN_TEMP_JOURNAL]),
// but not concurrency safe.
type SliceFile []byte
var (
// Ensure these interfaces are implemented:
_ vfs.FileSizeHint = &SliceFile{}
)
// ReadAt implements [io.ReaderAt].
func (f *SliceFile) ReadAt(b []byte, off int64) (n int, err error) {
if d := *f; off < int64(len(d)) {
n = copy(b, d[off:])
}
if n < len(b) {
err = io.EOF
}
return
}
// WriteAt implements [io.WriterAt].
func (f *SliceFile) WriteAt(b []byte, off int64) (n int, err error) {
d := *f
if off > int64(len(d)) {
d = append(d, make([]byte, off-int64(len(d)))...)
}
d = append(d[:off], b...)
if len(d) > len(*f) {
*f = d
}
return len(b), nil
}
// Size implements [vfs.File].
func (f *SliceFile) Size() (int64, error) {
return int64(len(*f)), nil
}
// Truncate implements [vfs.File].
func (f *SliceFile) Truncate(size int64) error {
if d := *f; size < int64(len(d)) {
*f = d[:size]
}
return nil
}
// SizeHint implements [vfs.FileSizeHint].
func (f *SliceFile) SizeHint(size int64) error {
if d := *f; size > int64(len(d)) {
*f = append(d, make([]byte, size-int64(len(d)))...)
}
return nil
}
// Close implements [io.Closer].
func (*SliceFile) Close() error { return nil }
// Sync implements [vfs.File].
func (*SliceFile) Sync(flags vfs.SyncFlag) error { return nil }
// Lock implements [vfs.File].
func (*SliceFile) Lock(lock vfs.LockLevel) error {
// notest // not concurrency safe
return sqlite3.IOERR_LOCK
}
// Unlock implements [vfs.File].
func (*SliceFile) Unlock(lock vfs.LockLevel) error {
// notest // not concurrency safe
return sqlite3.IOERR_UNLOCK
}
// CheckReservedLock implements [vfs.File].
func (*SliceFile) CheckReservedLock() (bool, error) {
// notest // not concurrency safe
return false, sqlite3.IOERR_CHECKRESERVEDLOCK
}
// SectorSize implements [vfs.File].
func (*SliceFile) SectorSize() int {
// notest // safe default
return 0
}
// DeviceCharacteristics implements [vfs.File].
func (*SliceFile) DeviceCharacteristics() vfs.DeviceCharacteristic {
return vfs.IOCAP_ATOMIC |
vfs.IOCAP_SEQUENTIAL |
vfs.IOCAP_SAFE_APPEND |
vfs.IOCAP_POWERSAFE_OVERWRITE |
vfs.IOCAP_SUBPAGE_READ
}

View file

@ -0,0 +1,185 @@
// Package vfsutil implements virtual filesystem utilities.
package vfsutil
import (
"github.com/ncruces/go-sqlite3"
"github.com/ncruces/go-sqlite3/vfs"
)
// UnwrapFile unwraps a [vfs.File],
// possibly implementing [vfs.FileUnwrap],
// to a concrete type.
func UnwrapFile[T vfs.File](f vfs.File) (_ T, _ bool) {
for {
switch t := f.(type) {
default:
return
case T:
return t, true
case vfs.FileUnwrap:
f = t.Unwrap()
}
}
}
// WrapOpen helps wrap [vfs.VFS].
func WrapOpen(f vfs.VFS, name string, flags vfs.OpenFlag) (file vfs.File, _ vfs.OpenFlag, err error) {
if f, ok := f.(vfs.VFSFilename); name == "" && ok {
return f.OpenFilename(nil, flags)
}
return f.Open(name, flags)
}
// WrapOpenFilename helps wrap [vfs.VFSFilename].
func WrapOpenFilename(f vfs.VFS, name *vfs.Filename, flags vfs.OpenFlag) (file vfs.File, _ vfs.OpenFlag, err error) {
if f, ok := f.(vfs.VFSFilename); ok {
return f.OpenFilename(name, flags)
}
return f.Open(name.String(), flags)
}
// WrapLockState helps wrap [vfs.FileLockState].
func WrapLockState(f vfs.File) vfs.LockLevel {
if f, ok := f.(vfs.FileLockState); ok {
return f.LockState()
}
return vfs.LOCK_EXCLUSIVE + 1 // UNKNOWN_LOCK
}
// WrapPersistWAL helps wrap [vfs.FilePersistWAL].
func WrapPersistWAL(f vfs.File) bool {
if f, ok := f.(vfs.FilePersistWAL); ok {
return f.PersistWAL()
}
return false
}
// WrapSetPersistWAL helps wrap [vfs.FilePersistWAL].
func WrapSetPersistWAL(f vfs.File, keepWAL bool) {
if f, ok := f.(vfs.FilePersistWAL); ok {
f.SetPersistWAL(keepWAL)
}
}
// WrapPowersafeOverwrite helps wrap [vfs.FilePowersafeOverwrite].
func WrapPowersafeOverwrite(f vfs.File) bool {
if f, ok := f.(vfs.FilePowersafeOverwrite); ok {
return f.PowersafeOverwrite()
}
return false
}
// WrapSetPowersafeOverwrite helps wrap [vfs.FilePowersafeOverwrite].
func WrapSetPowersafeOverwrite(f vfs.File, psow bool) {
if f, ok := f.(vfs.FilePowersafeOverwrite); ok {
f.SetPowersafeOverwrite(psow)
}
}
// WrapChunkSize helps wrap [vfs.FileChunkSize].
func WrapChunkSize(f vfs.File, size int) {
if f, ok := f.(vfs.FileChunkSize); ok {
f.ChunkSize(size)
}
}
// WrapSizeHint helps wrap [vfs.FileSizeHint].
func WrapSizeHint(f vfs.File, size int64) error {
if f, ok := f.(vfs.FileSizeHint); ok {
return f.SizeHint(size)
}
return sqlite3.NOTFOUND
}
// WrapHasMoved helps wrap [vfs.FileHasMoved].
func WrapHasMoved(f vfs.File) (bool, error) {
if f, ok := f.(vfs.FileHasMoved); ok {
return f.HasMoved()
}
return false, sqlite3.NOTFOUND
}
// WrapOverwrite helps wrap [vfs.FileOverwrite].
func WrapOverwrite(f vfs.File) error {
if f, ok := f.(vfs.FileOverwrite); ok {
return f.Overwrite()
}
return sqlite3.NOTFOUND
}
// WrapSyncSuper helps wrap [vfs.FileSync].
func WrapSyncSuper(f vfs.File, super string) error {
if f, ok := f.(vfs.FileSync); ok {
return f.SyncSuper(super)
}
return sqlite3.NOTFOUND
}
// WrapCommitPhaseTwo helps wrap [vfs.FileCommitPhaseTwo].
func WrapCommitPhaseTwo(f vfs.File) error {
if f, ok := f.(vfs.FileCommitPhaseTwo); ok {
return f.CommitPhaseTwo()
}
return sqlite3.NOTFOUND
}
// WrapBeginAtomicWrite helps wrap [vfs.FileBatchAtomicWrite].
func WrapBeginAtomicWrite(f vfs.File) error {
if f, ok := f.(vfs.FileBatchAtomicWrite); ok {
return f.BeginAtomicWrite()
}
return sqlite3.NOTFOUND
}
// WrapCommitAtomicWrite helps wrap [vfs.FileBatchAtomicWrite].
func WrapCommitAtomicWrite(f vfs.File) error {
if f, ok := f.(vfs.FileBatchAtomicWrite); ok {
return f.CommitAtomicWrite()
}
return sqlite3.NOTFOUND
}
// WrapRollbackAtomicWrite helps wrap [vfs.FileBatchAtomicWrite].
func WrapRollbackAtomicWrite(f vfs.File) error {
if f, ok := f.(vfs.FileBatchAtomicWrite); ok {
return f.RollbackAtomicWrite()
}
return sqlite3.NOTFOUND
}
// WrapCheckpointStart helps wrap [vfs.FileCheckpoint].
func WrapCheckpointStart(f vfs.File) {
if f, ok := f.(vfs.FileCheckpoint); ok {
f.CheckpointStart()
}
}
// WrapCheckpointDone helps wrap [vfs.FileCheckpoint].
func WrapCheckpointDone(f vfs.File) {
if f, ok := f.(vfs.FileCheckpoint); ok {
f.CheckpointDone()
}
}
// WrapPragma helps wrap [vfs.FilePragma].
func WrapPragma(f vfs.File, name, value string) (string, error) {
if f, ok := f.(vfs.FilePragma); ok {
return f.Pragma(name, value)
}
return "", sqlite3.NOTFOUND
}
// WrapBusyHandler helps wrap [vfs.FilePragma].
func WrapBusyHandler(f vfs.File, handler func() bool) {
if f, ok := f.(vfs.FileBusyHandler); ok {
f.BusyHandler(handler)
}
}
// WrapSharedMemory helps wrap [vfs.FileSharedMemory].
func WrapSharedMemory(f vfs.File) vfs.SharedMemory {
if f, ok := f.(vfs.FileSharedMemory); ok {
return f.SharedMemory()
}
return nil
}

View file

@ -31,9 +31,9 @@ func (v Value) Dup() *Value {
// Close frees an SQL value previously obtained by [Value.Dup]. // Close frees an SQL value previously obtained by [Value.Dup].
// //
// https://sqlite.org/c3ref/value_dup.html // https://sqlite.org/c3ref/value_dup.html
func (dup *Value) Close() error { func (v *Value) Close() error {
dup.c.call("sqlite3_value_free", stk_t(dup.handle)) v.c.call("sqlite3_value_free", stk_t(v.handle))
dup.handle = 0 v.handle = 0
return nil return nil
} }

View file

@ -36,9 +36,9 @@ type VFSFilename interface {
// //
// https://sqlite.org/c3ref/io_methods.html // https://sqlite.org/c3ref/io_methods.html
type File interface { type File interface {
Close() error io.Closer
ReadAt(p []byte, off int64) (n int, err error) io.ReaderAt
WriteAt(p []byte, off int64) (n int, err error) io.WriterAt
Truncate(size int64) error Truncate(size int64) error
Sync(flags SyncFlag) error Sync(flags SyncFlag) error
Size() (int64, error) Size() (int64, error)

View file

@ -5,7 +5,6 @@ import (
"context" "context"
_ "embed" _ "embed"
"encoding/binary" "encoding/binary"
"strconv"
"github.com/tetratelabs/wazero/api" "github.com/tetratelabs/wazero/api"
@ -13,48 +12,30 @@ import (
"github.com/ncruces/go-sqlite3/util/sql3util" "github.com/ncruces/go-sqlite3/util/sql3util"
) )
func cksmWrapFile(name *Filename, flags OpenFlag, file File) File { func cksmWrapFile(file File, flags OpenFlag) File {
// Checksum only main databases and WALs. // Checksum only main databases.
if flags&(OPEN_MAIN_DB|OPEN_WAL) == 0 { if flags&OPEN_MAIN_DB == 0 {
return file return file
} }
return &cksmFile{File: file}
cksm := cksmFile{File: file}
if flags&OPEN_WAL != 0 {
main, _ := name.DatabaseFile().(cksmFile)
cksm.cksmFlags = main.cksmFlags
} else {
cksm.cksmFlags = new(cksmFlags)
cksm.isDB = true
}
return cksm
} }
type cksmFile struct { type cksmFile struct {
File File
*cksmFlags
isDB bool
}
type cksmFlags struct {
computeCksm bool
verifyCksm bool verifyCksm bool
inCkpt bool computeCksm bool
pageSize int
} }
func (c cksmFile) ReadAt(p []byte, off int64) (n int, err error) { func (c *cksmFile) ReadAt(p []byte, off int64) (n int, err error) {
n, err = c.File.ReadAt(p, off) n, err = c.File.ReadAt(p, off)
p = p[:n] p = p[:n]
if isHeader(c.isDB, p, off) { if isHeader(p, off) {
c.init((*[100]byte)(p)) c.init((*[100]byte)(p))
} }
// Verify checksums. // Verify checksums.
if c.verifyCksm && !c.inCkpt && len(p) == c.pageSize { if c.verifyCksm && sql3util.ValidPageSize(len(p)) {
cksm1 := cksmCompute(p[:len(p)-8]) cksm1 := cksmCompute(p[:len(p)-8])
cksm2 := *(*[8]byte)(p[len(p)-8:]) cksm2 := *(*[8]byte)(p[len(p)-8:])
if cksm1 != cksm2 { if cksm1 != cksm2 {
@ -64,20 +45,20 @@ func (c cksmFile) ReadAt(p []byte, off int64) (n int, err error) {
return n, err return n, err
} }
func (c cksmFile) WriteAt(p []byte, off int64) (n int, err error) { func (c *cksmFile) WriteAt(p []byte, off int64) (n int, err error) {
if isHeader(c.isDB, p, off) { if isHeader(p, off) {
c.init((*[100]byte)(p)) c.init((*[100]byte)(p))
} }
// Compute checksums. // Compute checksums.
if c.computeCksm && !c.inCkpt && len(p) == c.pageSize { if c.computeCksm && sql3util.ValidPageSize(len(p)) {
*(*[8]byte)(p[len(p)-8:]) = cksmCompute(p[:len(p)-8]) *(*[8]byte)(p[len(p)-8:]) = cksmCompute(p[:len(p)-8])
} }
return c.File.WriteAt(p, off) return c.File.WriteAt(p, off)
} }
func (c cksmFile) Pragma(name string, value string) (string, error) { func (c *cksmFile) Pragma(name string, value string) (string, error) {
switch name { switch name {
case "checksum_verification": case "checksum_verification":
b, ok := sql3util.ParseBool(value) b, ok := sql3util.ParseBool(value)
@ -90,15 +71,15 @@ func (c cksmFile) Pragma(name string, value string) (string, error) {
return "1", nil return "1", nil
case "page_size": case "page_size":
if c.computeCksm { if c.computeCksm && value != "" {
// Do not allow page size changes on a checksum database. // Do not allow page size changes on a checksum database.
return strconv.Itoa(c.pageSize), nil return "", nil
} }
} }
return "", _NOTFOUND return "", _NOTFOUND
} }
func (c cksmFile) DeviceCharacteristics() DeviceCharacteristic { func (c *cksmFile) DeviceCharacteristics() DeviceCharacteristic {
ret := c.File.DeviceCharacteristics() ret := c.File.DeviceCharacteristics()
if c.verifyCksm { if c.verifyCksm {
ret &^= IOCAP_SUBPAGE_READ ret &^= IOCAP_SUBPAGE_READ
@ -106,13 +87,8 @@ func (c cksmFile) DeviceCharacteristics() DeviceCharacteristic {
return ret return ret
} }
func (c cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpcode, pArg ptr_t) _ErrorCode { func (c *cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpcode, pArg ptr_t) _ErrorCode {
switch op { if op == _FCNTL_PRAGMA {
case _FCNTL_CKPT_START:
c.inCkpt = true
case _FCNTL_CKPT_DONE:
c.inCkpt = false
case _FCNTL_PRAGMA:
rc := vfsFileControlImpl(ctx, mod, c, op, pArg) rc := vfsFileControlImpl(ctx, mod, c, op, pArg)
if rc != _NOTFOUND { if rc != _NOTFOUND {
return rc return rc
@ -121,24 +97,26 @@ func (c cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpco
return vfsFileControlImpl(ctx, mod, c.File, op, pArg) return vfsFileControlImpl(ctx, mod, c.File, op, pArg)
} }
func (f *cksmFlags) init(header *[100]byte) { func (c *cksmFile) init(header *[100]byte) {
f.pageSize = 256 * int(binary.LittleEndian.Uint16(header[16:18])) if r := header[20] == 8; r != c.computeCksm {
if r := header[20] == 8; r != f.computeCksm { c.computeCksm = r
f.computeCksm = r c.verifyCksm = r
f.verifyCksm = r
}
if !sql3util.ValidPageSize(f.pageSize) {
f.computeCksm = false
f.verifyCksm = false
} }
} }
func isHeader(isDB bool, p []byte, off int64) bool { func (c *cksmFile) SharedMemory() SharedMemory {
check := sql3util.ValidPageSize(len(p)) if f, ok := c.File.(FileSharedMemory); ok {
if isDB { return f.SharedMemory()
check = off == 0 && len(p) >= 100
} }
return check && bytes.HasPrefix(p, []byte("SQLite format 3\000")) return nil
}
func (c *cksmFile) Unwrap() File {
return c.File
}
func isHeader(p []byte, off int64) bool {
return off == 0 && len(p) >= 100 && bytes.HasPrefix(p, []byte("SQLite format 3\000"))
} }
func cksmCompute(a []byte) (cksm [8]byte) { func cksmCompute(a []byte) (cksm [8]byte) {
@ -155,14 +133,3 @@ func cksmCompute(a []byte) (cksm [8]byte) {
binary.LittleEndian.PutUint32(cksm[4:8], s2) binary.LittleEndian.PutUint32(cksm[4:8], s2)
return return
} }
func (c cksmFile) SharedMemory() SharedMemory {
if f, ok := c.File.(FileSharedMemory); ok {
return f.SharedMemory()
}
return nil
}
func (c cksmFile) Unwrap() File {
return c.File
}

View file

@ -75,6 +75,9 @@ func (vfsOS) Access(name string, flags AccessFlag) (bool, error) {
func (vfsOS) Open(name string, flags OpenFlag) (File, OpenFlag, error) { func (vfsOS) Open(name string, flags OpenFlag) (File, OpenFlag, error) {
// notest // OpenFilename is called instead // notest // OpenFilename is called instead
if name == "" {
return vfsOS{}.OpenFilename(nil, flags)
}
return nil, 0, _CANTOPEN return nil, 0, _CANTOPEN
} }

View file

@ -56,7 +56,7 @@ func (n *Filename) Journal() string {
return n.path("sqlite3_filename_journal") return n.path("sqlite3_filename_journal")
} }
// Journal returns the name of the corresponding WAL file. // WAL returns the name of the corresponding WAL file.
// //
// https://sqlite.org/c3ref/filename_database.html // https://sqlite.org/c3ref/filename_database.html
func (n *Filename) WAL() string { func (n *Filename) WAL() string {

View file

@ -2,40 +2,39 @@ package memdb
import ( import (
"io" "io"
"strings"
"sync" "sync"
"time" "time"
"github.com/ncruces/go-sqlite3" "github.com/ncruces/go-sqlite3"
"github.com/ncruces/go-sqlite3/util/vfsutil"
"github.com/ncruces/go-sqlite3/vfs" "github.com/ncruces/go-sqlite3/vfs"
) )
const sectorSize = 65536 const sectorSize = 65536
// Ensure sectorSize is a multiple of 64K (the largest page size).
var _ [0]struct{} = [sectorSize & 65535]struct{}{}
type memVFS struct{} type memVFS struct{}
func (memVFS) Open(name string, flags vfs.OpenFlag) (vfs.File, vfs.OpenFlag, error) { func (memVFS) Open(name string, flags vfs.OpenFlag) (vfs.File, vfs.OpenFlag, error) {
// For simplicity, we do not support reading or writing data // For simplicity, we do not support reading or writing data
// across "sector" boundaries. // across "sector" boundaries.
// // This is not a problem for SQLite database files.
// This is not a problem for most SQLite file types: const databases = vfs.OPEN_MAIN_DB | vfs.OPEN_TEMP_DB | vfs.OPEN_TRANSIENT_DB
// - databases, which only do page aligned reads/writes;
// - temp journals, as used by the sorter, which does the same: // Temp journals, as used by the sorter, use SliceFile.
// https://github.com/sqlite/sqlite/blob/b74eb0/src/vdbesort.c#L409-L412 if flags&vfs.OPEN_TEMP_JOURNAL != 0 {
// return &vfsutil.SliceFile{}, flags | vfs.OPEN_MEMORY, nil
// We refuse to open all other file types, }
// but returning OPEN_MEMORY means SQLite won't ask us to.
const types = vfs.OPEN_MAIN_DB | vfs.OPEN_TEMP_DB | // Refuse to open all other file types.
vfs.OPEN_TRANSIENT_DB | vfs.OPEN_TEMP_JOURNAL // Returning OPEN_MEMORY means SQLite won't ask us to.
if flags&types == 0 { if flags&databases == 0 {
// notest // OPEN_MEMORY // notest // OPEN_MEMORY
return nil, flags, sqlite3.CANTOPEN return nil, flags, sqlite3.CANTOPEN
} }
// A shared database has a name that begins with "/". // A shared database has a name that begins with "/".
shared := len(name) > 1 && name[0] == '/' shared := strings.HasPrefix(name, "/")
var db *memDB var db *memDB
if shared { if shared {
@ -76,18 +75,16 @@ func (memVFS) FullPathname(name string) (string, error) {
type memDB struct { type memDB struct {
name string name string
// +checklocks:lockMtx
waiter *sync.Cond
// +checklocks:dataMtx // +checklocks:dataMtx
data []*[sectorSize]byte data []*[sectorSize]byte
// +checklocks:dataMtx
size int64
// +checklocks:memoryMtx size int64 // +checklocks:dataMtx
refs int32 refs int32 // +checklocks:memoryMtx
shared int32 // +checklocks:lockMtx
shared int32 // +checklocks:lockMtx pending bool // +checklocks:lockMtx
pending bool // +checklocks:lockMtx reserved bool // +checklocks:lockMtx
reserved bool // +checklocks:lockMtx
waiter *sync.Cond // +checklocks:lockMtx
lockMtx sync.Mutex lockMtx sync.Mutex
dataMtx sync.RWMutex dataMtx sync.RWMutex
@ -129,7 +126,7 @@ func (m *memFile) ReadAt(b []byte, off int64) (n int, err error) {
base := off / sectorSize base := off / sectorSize
rest := off % sectorSize rest := off % sectorSize
have := int64(sectorSize) have := int64(sectorSize)
if base == int64(len(m.data))-1 { if m.size < off+int64(len(b)) {
have = modRoundUp(m.size, sectorSize) have = modRoundUp(m.size, sectorSize)
} }
n = copy(b, (*m.data[base])[rest:have]) n = copy(b, (*m.data[base])[rest:have])
@ -150,22 +147,37 @@ func (m *memFile) WriteAt(b []byte, off int64) (n int, err error) {
m.data = append(m.data, new([sectorSize]byte)) m.data = append(m.data, new([sectorSize]byte))
} }
n = copy((*m.data[base])[rest:], b) n = copy((*m.data[base])[rest:], b)
if size := off + int64(n); size > m.size {
m.size = size
}
if n < len(b) { if n < len(b) {
// notest // assume writes are page aligned // notest // assume writes are page aligned
return n, io.ErrShortWrite return n, io.ErrShortWrite
} }
if size := off + int64(len(b)); size > m.size {
m.size = size
}
return n, nil return n, nil
} }
func (m *memFile) Size() (int64, error) {
m.dataMtx.RLock()
defer m.dataMtx.RUnlock()
return m.size, nil
}
func (m *memFile) Truncate(size int64) error { func (m *memFile) Truncate(size int64) error {
m.dataMtx.Lock() m.dataMtx.Lock()
defer m.dataMtx.Unlock() defer m.dataMtx.Unlock()
return m.truncate(size) return m.truncate(size)
} }
func (m *memFile) SizeHint(size int64) error {
m.dataMtx.Lock()
defer m.dataMtx.Unlock()
if size > m.size {
return m.truncate(size)
}
return nil
}
// +checklocks:m.dataMtx // +checklocks:m.dataMtx
func (m *memFile) truncate(size int64) error { func (m *memFile) truncate(size int64) error {
if size < m.size { if size < m.size {
@ -185,16 +197,6 @@ func (m *memFile) truncate(size int64) error {
return nil return nil
} }
func (m *memFile) Sync(flag vfs.SyncFlag) error {
return nil
}
func (m *memFile) Size() (int64, error) {
m.dataMtx.RLock()
defer m.dataMtx.RUnlock()
return m.size, nil
}
func (m *memFile) Lock(lock vfs.LockLevel) error { func (m *memFile) Lock(lock vfs.LockLevel) error {
if m.lock >= lock { if m.lock >= lock {
return nil return nil
@ -278,31 +280,24 @@ func (m *memFile) CheckReservedLock() (bool, error) {
return m.reserved, nil return m.reserved, nil
} }
func (m *memFile) SectorSize() int { func (m *memFile) LockState() vfs.LockLevel {
return m.lock
}
func (*memFile) Sync(flag vfs.SyncFlag) error { return nil }
func (*memFile) SectorSize() int {
// notest // IOCAP_POWERSAFE_OVERWRITE // notest // IOCAP_POWERSAFE_OVERWRITE
return sectorSize return sectorSize
} }
func (m *memFile) DeviceCharacteristics() vfs.DeviceCharacteristic { func (*memFile) DeviceCharacteristics() vfs.DeviceCharacteristic {
return vfs.IOCAP_ATOMIC | return vfs.IOCAP_ATOMIC |
vfs.IOCAP_SEQUENTIAL | vfs.IOCAP_SEQUENTIAL |
vfs.IOCAP_SAFE_APPEND | vfs.IOCAP_SAFE_APPEND |
vfs.IOCAP_POWERSAFE_OVERWRITE vfs.IOCAP_POWERSAFE_OVERWRITE
} }
func (m *memFile) SizeHint(size int64) error {
m.dataMtx.Lock()
defer m.dataMtx.Unlock()
if size > m.size {
return m.truncate(size)
}
return nil
}
func (m *memFile) LockState() vfs.LockLevel {
return m.lock
}
func divRoundUp(a, b int64) int64 { func divRoundUp(a, b int64) int64 {
return (a + b - 1) / b return (a + b - 1) / b
} }

View file

@ -148,7 +148,7 @@ func vfsOpen(ctx context.Context, mod api.Module, pVfs, zPath, pFile ptr_t, flag
if pOutFlags != 0 { if pOutFlags != 0 {
util.Write32(mod, pOutFlags, flags) util.Write32(mod, pOutFlags, flags)
} }
file = cksmWrapFile(name, flags, file) file = cksmWrapFile(file, flags)
vfsFileRegister(ctx, mod, pFile, file) vfsFileRegister(ctx, mod, pFile, file)
return _OK return _OK
} }

View file

@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
} }
group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
} }
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
groups = append(groups, group) groups = append(groups, group)
} }
return groups return groups
@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer) buf := bufio.NewWriter(writer)
defer buf.Flush() defer buf.Flush()
wf := func(format string, args ...interface{}) error { wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...)) _, err := fmt.Fprintf(buf, format, args...)
return err return err
} }
ws := func(s string) error { ws := func(s string) error {

View file

@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
case pb.Counter != nil: case pb.Counter != nil:
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
case pb.Histogram != nil: case pb.Histogram != nil:
h := pb.Histogram
for _, e := range m.exemplars { for _, e := range m.exemplars {
// pb.Histogram.Bucket are sorted by UpperBound. if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() e.GetTimestamp() != nil {
h.Exemplars = append(h.Exemplars, e)
if len(h.Bucket) == 0 {
// Don't proceed to classic buckets if there are none.
continue
}
}
// h.Bucket are sorted by UpperBound.
i := sort.Search(len(h.Bucket), func(i int) bool {
return h.Bucket[i].GetUpperBound() >= e.GetValue()
}) })
if i < len(pb.Histogram.Bucket) { if i < len(h.Bucket) {
pb.Histogram.Bucket[i].Exemplar = e h.Bucket[i].Exemplar = e
} else { } else {
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
b := &dto.Bucket{ b := &dto.Bucket{
CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), CumulativeCount: proto.Uint64(h.GetSampleCount()),
UpperBound: proto.Float64(math.Inf(1)), UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e, Exemplar: e,
} }
pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) h.Bucket = append(h.Bucket, b)
} }
} }
default: default:
@ -227,6 +237,7 @@ type Exemplar struct {
// Only last applicable exemplar is injected from the list. // Only last applicable exemplar is injected from the list.
// For example for Counter it means last exemplar is injected. // For example for Counter it means last exemplar is injected.
// For Histogram, it means last applicable exemplar for each bucket is injected. // For Histogram, it means last applicable exemplar for each bucket is injected.
// For a Native Histogram, all valid exemplars are injected.
// //
// NewMetricWithExemplars works best with MustNewConstMetric and // NewMetricWithExemplars works best with MustNewConstMetric and
// MustNewConstHistogram, see example. // MustNewConstHistogram, see example.

View file

@ -25,9 +25,9 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
// notImplementedErr is returned by stub functions that replace cgo functions, when cgo // errNotImplemented is returned by stub functions that replace cgo functions, when cgo
// isn't available. // isn't available.
var notImplementedErr = errors.New("not implemented") var errNotImplemented = errors.New("not implemented")
type memoryInfo struct { type memoryInfo struct {
vsize uint64 // Virtual memory size in bytes vsize uint64 // Virtual memory size in bytes
@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if memInfo, err := getMemory(); err == nil { if memInfo, err := getMemory(); err == nil {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
} else if !errors.Is(err, notImplementedErr) { } else if !errors.Is(err, errNotImplemented) {
// Don't report an error when support is not compiled in. // Don't report an error when support is not compiled in.
c.reportError(ch, c.rss, err) c.reportError(ch, c.rss, err)
c.reportError(ch, c.vsize, err) c.reportError(ch, c.vsize, err)

View file

@ -16,7 +16,7 @@
package prometheus package prometheus
func getMemory() (*memoryInfo, error) { func getMemory() (*memoryInfo, error) {
return nil, notImplementedErr return nil, errNotImplemented
} }
// describe returns all descriptions of the collector for Darwin. // describe returns all descriptions of the collector for Darwin.

View file

@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if netstat, err := p.Netstat(); err == nil { if netstat, err := p.Netstat(); err == nil {
var inOctets, outOctets float64 var inOctets, outOctets float64
if netstat.IpExt.InOctets != nil { if netstat.InOctets != nil {
inOctets = *netstat.IpExt.InOctets inOctets = *netstat.InOctets
} }
if netstat.IpExt.OutOctets != nil { if netstat.OutOctets != nil {
outOctets = *netstat.IpExt.OutOctets outOctets = *netstat.OutOctets
} }
ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)

View file

@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
labels := prometheus.Labels{} labels := prometheus.Labels{}
if !(code || method) { if !code && !method {
return labels return labels
} }

View file

@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
return false return false
} }
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) return m.deleteByHashWithLabelValues(h, lvs, m.curry)
} }
// Delete deletes the metric where the variable labels are the same as those // Delete deletes the metric where the variable labels are the same as those
@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
return false return false
} }
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) return m.deleteByHashWithLabels(h, labels, m.curry)
} }
// DeletePartialMatch deletes all metrics where the variable labels contain all of those // DeletePartialMatch deletes all metrics where the variable labels contain all of those
@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels, closer := constrainLabels(m.desc, labels) labels, closer := constrainLabels(m.desc, labels)
defer closer() defer closer()
return m.metricMap.deleteByLabels(labels, m.curry) return m.deleteByLabels(labels, m.curry)
} }
// Without explicit forwarding of Describe, Collect, Reset, those methods won't // Without explicit forwarding of Describe, Collect, Reset, those methods won't
@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err return nil, err
} }
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
} }
// GetMetricWith returns the Metric for the given Labels map (the label names // GetMetricWith returns the Metric for the given Labels map (the label names
@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err return nil, err
} }
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
} }
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {

View file

@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// metric names that are standardized across applications, as that would break // metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector // horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In // (see NewGoCollector) and the process collector (see NewProcessCollector). (In
// fact, those metrics are already prefixed with “go_” or “process_”, // fact, those metrics are already prefixed with "go_" or "process_",
// respectively.) // respectively.)
// //
// Conflicts between Collectors registered through the original Registerer with // Conflicts between Collectors registered through the original Registerer with
@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
} }
} }
// WrapCollectorWith returns a Collector wrapping the provided Collector. The
// wrapped Collector will add the provided Labels to all Metrics it collects (as
// ConstLabels). The Metrics collected by the unmodified Collector must not
// duplicate any of those labels.
//
// WrapCollectorWith can be useful to work with multiple instances of a third
// party library that does not expose enough flexibility on the lifecycle of its
// registered metrics.
// For example, let's say you have a foo.New(reg Registerer) constructor that
// registers metrics but never unregisters them, and you want to create multiple
// instances of foo.Foo with different labels.
// The way to achieve that, is to create a new Registry, pass it to foo.New,
// then use WrapCollectorWith to wrap that Registry with the desired labels and
// register that as a collector in your main Registry.
// Then you can un-register the wrapped collector effectively un-registering the
// metrics registered by foo.New.
func WrapCollectorWith(labels Labels, c Collector) Collector {
return &wrappingCollector{
wrappedCollector: c,
labels: labels,
}
}
// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
//
// See the documentation of WrapCollectorWith for more details on the use case.
func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
return &wrappingCollector{
wrappedCollector: c,
prefix: prefix,
}
}
type wrappingRegisterer struct { type wrappingRegisterer struct {
wrappedRegisterer Registerer wrappedRegisterer Registerer
prefix string prefix string

View file

@ -1,2 +1,120 @@
# otlp-prometheus-translator # OTLP Prometheus Translator
Library providing API to convert OTLP metric and attribute names to respectively Prometheus metric and label names.
A Go library for converting [OpenTelemetry Protocol (OTLP)](https://opentelemetry.io/docs/specs/otlp/) metric and attribute names to [Prometheus](https://prometheus.io/)-compliant formats.
Part of the [Prometheus](https://prometheus.io/) ecosystem, following the [OpenTelemetry to Prometheus compatibility specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md).
## Features
- **Metric Name and Label Translation**: Convert OTLP metric names and attributes to Prometheus-compliant format
- **Unit Handling**: Translate OTLP units to Prometheus unit conventions
- **Type-Aware Suffixes**: Optionally append `_total`, `_ratio` based on metric type
- **Namespace Support**: Add configurable namespace prefixes
- **UTF-8 Support**: Choose between Prometheus legacy scheme compliant metric/label names (`[a-zA-Z0-9:_]`) or untranslated metric/label names
- **Translation Strategy Configuration**: Select a translation strategy with a standard set of strings.
## Installation
```bash
go get github.com/prometheus/otlptranslator
```
## Quick Start
```go
package main
import (
"fmt"
"github.com/prometheus/otlptranslator"
)
func main() {
// Create a metric namer using traditional Prometheus name translation, with suffixes added and UTF-8 disallowed.
strategy := otlptranslator.UnderscoreEscapingWithSuffixes
namer := otlptranslator.NewMetricNamer("myapp", strategy)
// Translate OTLP metric to Prometheus format
metric := otlptranslator.Metric{
Name: "http.server.request.duration",
Unit: "s",
Type: otlptranslator.MetricTypeHistogram,
}
fmt.Println(namer.Build(metric)) // Output: myapp_http_server_request_duration_seconds
// Translate label names
labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false}
fmt.Println(labelNamer.Build("http.method")) // Output: http_method
}
```
## Usage Examples
### Metric Name Translation
```go
namer := otlptranslator.MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false}
// Counter gets _total suffix
counter := otlptranslator.Metric{
Name: "requests.count", Unit: "1", Type: otlptranslator.MetricTypeMonotonicCounter,
}
fmt.Println(namer.Build(counter)) // requests_count_total
// Gauge with unit conversion
gauge := otlptranslator.Metric{
Name: "memory.usage", Unit: "By", Type: otlptranslator.MetricTypeGauge,
}
fmt.Println(namer.Build(gauge)) // memory_usage_bytes
// Dimensionless gauge gets _ratio suffix
ratio := otlptranslator.Metric{
Name: "cpu.utilization", Unit: "1", Type: otlptranslator.MetricTypeGauge,
}
fmt.Println(namer.Build(ratio)) // cpu_utilization_ratio
```
### Label Translation
```go
labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false}
labelNamer.Build("http.method") // http_method
labelNamer.Build("123invalid") // key_123invalid
labelNamer.Build("_private") // key_private
labelNamer.Build("__reserved__") // __reserved__ (preserved)
labelNamer.Build("label@with$symbols") // label_with_symbols
```
### Unit Translation
```go
unitNamer := otlptranslator.UnitNamer{UTF8Allowed: false}
unitNamer.Build("s") // seconds
unitNamer.Build("By") // bytes
unitNamer.Build("requests/s") // requests_per_second
unitNamer.Build("1") // "" (dimensionless)
```
### Configuration Options
```go
// Prometheus-compliant mode - supports [a-zA-Z0-9:_]
compliantNamer := otlptranslator.MetricNamer{UTF8Allowed: false, WithMetricSuffixes: true}
// Transparent pass-through mode, aka "NoTranslation"
utf8Namer := otlptranslator.MetricNamer{UTF8Allowed: true, WithMetricSuffixes: false}
utf8Namer = otlptranslator.NewMetricNamer("", otlpTranslator.NoTranslation)
// With namespace and suffixes
productionNamer := otlptranslator.MetricNamer{
Namespace: "myservice",
WithMetricSuffixes: true,
UTF8Allowed: false,
}
```
## License
Licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.

24
vendor/github.com/prometheus/otlptranslator/doc.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package otlptranslator provides utilities for converting OpenTelemetry Protocol (OTLP)
// metric and attribute names to Prometheus-compliant formats.
//
// This package is designed to help users translate OpenTelemetry metrics to Prometheus
// metrics while following the official OpenTelemetry to Prometheus compatibility specification.
//
// Main components:
// - MetricNamer: Translates OTLP metric names to Prometheus metric names
// - LabelNamer: Translates OTLP attribute names to Prometheus label names
// - UnitNamer: Translates OTLP units to Prometheus unit conventions
package otlptranslator

View file

@ -0,0 +1,90 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The Prometheus Authors
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package otlptranslator
import (
"fmt"
"strings"
"unicode"
)
// LabelNamer is a helper struct to build label names.
// It translates OpenTelemetry Protocol (OTLP) attribute names to Prometheus-compliant label names.
//
// Example usage:
//
// namer := LabelNamer{UTF8Allowed: false}
// result := namer.Build("http.method") // "http_method"
type LabelNamer struct {
UTF8Allowed bool
}
// Build normalizes the specified label to follow Prometheus label names standard.
//
// Translation rules:
// - Replaces invalid characters with underscores
// - Prefixes labels with invalid start characters (numbers or `_`) with "key"
// - Preserves double underscore labels (reserved names)
// - If UTF8Allowed is true, returns label as-is
//
// Examples:
//
// namer := LabelNamer{UTF8Allowed: false}
// namer.Build("http.method") // "http_method"
// namer.Build("123invalid") // "key_123invalid"
// namer.Build("__reserved__") // "__reserved__" (preserved)
func (ln *LabelNamer) Build(label string) (normalizedName string, err error) {
defer func() {
if len(normalizedName) == 0 {
err = fmt.Errorf("normalization for label name %q resulted in empty name", label)
return
}
if ln.UTF8Allowed || normalizedName == label {
return
}
// Check that the resulting normalized name contains at least one non-underscore character
for _, c := range normalizedName {
if c != '_' {
return
}
}
err = fmt.Errorf("normalization for label name %q resulted in invalid name %q", label, normalizedName)
normalizedName = ""
}()
// Trivial case.
if len(label) == 0 || ln.UTF8Allowed {
normalizedName = label
return
}
normalizedName = sanitizeLabelName(label)
// If label starts with a number, prepend with "key_".
if unicode.IsDigit(rune(normalizedName[0])) {
normalizedName = "key_" + normalizedName
} else if strings.HasPrefix(normalizedName, "_") && !strings.HasPrefix(normalizedName, "__") {
normalizedName = "key" + normalizedName
}
return
}

View file

@ -20,6 +20,7 @@
package otlptranslator package otlptranslator
import ( import (
"fmt"
"slices" "slices"
"strings" "strings"
"unicode" "unicode"
@ -81,13 +82,48 @@ var perUnitMap = map[string]string{
} }
// MetricNamer is a helper struct to build metric names. // MetricNamer is a helper struct to build metric names.
// It converts OpenTelemetry Protocol (OTLP) metric names to Prometheus-compliant metric names.
//
// Example usage:
//
// namer := MetricNamer{
// WithMetricSuffixes: true,
// UTF8Allowed: false,
// }
//
// metric := Metric{
// Name: "http.server.duration",
// Unit: "s",
// Type: MetricTypeHistogram,
// }
//
// result := namer.Build(metric) // "http_server_duration_seconds"
type MetricNamer struct { type MetricNamer struct {
Namespace string Namespace string
WithMetricSuffixes bool WithMetricSuffixes bool
UTF8Allowed bool UTF8Allowed bool
} }
// NewMetricNamer creates a MetricNamer with the specified namespace (can be
// blank) and the requested Translation Strategy.
func NewMetricNamer(namespace string, strategy TranslationStrategyOption) MetricNamer {
return MetricNamer{
Namespace: namespace,
WithMetricSuffixes: strategy.ShouldAddSuffixes(),
UTF8Allowed: !strategy.ShouldEscape(),
}
}
// Metric is a helper struct that holds information about a metric. // Metric is a helper struct that holds information about a metric.
// It represents an OpenTelemetry metric with its name, unit, and type.
//
// Example:
//
// metric := Metric{
// Name: "http.server.request.duration",
// Unit: "s",
// Type: MetricTypeHistogram,
// }
type Metric struct { type Metric struct {
Name string Name string
Unit string Unit string
@ -96,34 +132,70 @@ type Metric struct {
// Build builds a metric name for the specified metric. // Build builds a metric name for the specified metric.
// //
// If UTF8Allowed is true, the metric name is returned as is, only with the addition of type/unit suffixes and namespace preffix if required. // The method applies different transformations based on the MetricNamer configuration:
// Otherwise the metric name is normalized to be Prometheus-compliant. // - If UTF8Allowed is true, doesn't translate names - all characters must be valid UTF-8, however.
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // - If UTF8Allowed is false, translates metric names to comply with legacy Prometheus name scheme by escaping invalid characters to `_`.
// https://prometheus.io/docs/practices/naming/#metric-and-label-naming // - If WithMetricSuffixes is true, adds appropriate suffixes based on type and unit.
func (mn *MetricNamer) Build(metric Metric) string { //
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
//
// Examples:
//
// namer := MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false}
//
// // Counter gets _total suffix
// counter := Metric{Name: "requests.count", Unit: "1", Type: MetricTypeMonotonicCounter}
// result := namer.Build(counter) // "requests_count_total"
//
// // Gauge with unit suffix
// gauge := Metric{Name: "memory.usage", Unit: "By", Type: MetricTypeGauge}
// result = namer.Build(gauge) // "memory_usage_bytes"
func (mn *MetricNamer) Build(metric Metric) (string, error) {
if mn.UTF8Allowed { if mn.UTF8Allowed {
return mn.buildMetricName(metric.Name, metric.Unit, metric.Type) return mn.buildMetricName(metric.Name, metric.Unit, metric.Type)
} }
return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type) return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type)
} }
func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) string { func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) (normalizedName string, err error) {
defer func() {
if len(normalizedName) == 0 {
err = fmt.Errorf("normalization for metric %q resulted in empty name", name)
return
}
if normalizedName == name {
return
}
// Check that the resulting normalized name contains at least one non-underscore character
for _, c := range normalizedName {
if c != '_' {
return
}
}
err = fmt.Errorf("normalization for metric %q resulted in invalid name %q", name, normalizedName)
normalizedName = ""
}()
// Full normalization following standard Prometheus naming conventions // Full normalization following standard Prometheus naming conventions
if mn.WithMetricSuffixes { if mn.WithMetricSuffixes {
return normalizeName(name, unit, metricType, mn.Namespace) normalizedName = normalizeName(name, unit, metricType, mn.Namespace)
return
} }
// Simple case (no full normalization, no units, etc.). // Simple case (no full normalization, no units, etc.).
metricName := strings.Join(strings.FieldsFunc(name, func(r rune) bool { metricName := strings.Join(strings.FieldsFunc(name, func(r rune) bool {
return invalidMetricCharRE.MatchString(string(r)) return !isValidCompliantMetricChar(r) && r != '_'
}), "_") }), "_")
// Namespace? // Namespace?
if mn.Namespace != "" { if mn.Namespace != "" {
namespace := strings.Join(strings.FieldsFunc(mn.Namespace, func(r rune) bool { namespace := strings.Join(strings.FieldsFunc(mn.Namespace, func(r rune) bool {
return invalidMetricCharRE.MatchString(string(r)) return !isValidCompliantMetricChar(r) && r != '_'
}), "_") }), "_")
return namespace + "_" + metricName normalizedName = namespace + "_" + metricName
return
} }
// Metric name starts with a digit? Prefix it with an underscore. // Metric name starts with a digit? Prefix it with an underscore.
@ -131,14 +203,11 @@ func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType Me
metricName = "_" + metricName metricName = "_" + metricName
} }
return metricName normalizedName = metricName
return
} }
var ( var multipleUnderscoresRE = regexp.MustCompile(`__+`)
// Regexp for metric name characters that should be replaced with _.
invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`)
multipleUnderscoresRE = regexp.MustCompile(`__+`)
)
// isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :). // isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :).
func isValidCompliantMetricChar(r rune) bool { func isValidCompliantMetricChar(r rune) bool {
@ -243,33 +312,54 @@ func removeItem(slice []string, value string) []string {
return newSlice return newSlice
} }
func (mn *MetricNamer) buildMetricName(name, unit string, metricType MetricType) string { func (mn *MetricNamer) buildMetricName(inputName, unit string, metricType MetricType) (name string, err error) {
name = inputName
if mn.Namespace != "" { if mn.Namespace != "" {
name = mn.Namespace + "_" + name name = mn.Namespace + "_" + name
} }
if mn.WithMetricSuffixes { if mn.WithMetricSuffixes {
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
if mainUnitSuffix != "" {
name = name + "_" + mainUnitSuffix
}
if perUnitSuffix != "" {
name = name + "_" + perUnitSuffix
}
// Append _total for Counters
if metricType == MetricTypeMonotonicCounter {
name += "_total"
}
// Append _ratio for metrics with unit "1" // Append _ratio for metrics with unit "1"
// Some OTel receivers improperly use unit "1" for counters of objects // Some OTel receivers improperly use unit "1" for counters of objects
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
if unit == "1" && metricType == MetricTypeGauge { if unit == "1" && metricType == MetricTypeGauge {
name += "_ratio" name = trimSuffixAndDelimiter(name, "ratio")
defer func() {
name += "_ratio"
}()
} }
// Append _total for Counters.
if metricType == MetricTypeMonotonicCounter {
name = trimSuffixAndDelimiter(name, "total")
defer func() {
name += "_total"
}()
}
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
if perUnitSuffix != "" {
name = trimSuffixAndDelimiter(name, perUnitSuffix)
defer func() {
name = name + "_" + perUnitSuffix
}()
}
// We don't need to trim and re-append the suffix here because this is
// the inner-most suffix.
if mainUnitSuffix != "" && !strings.HasSuffix(name, mainUnitSuffix) {
name = name + "_" + mainUnitSuffix
}
}
return
}
// trimSuffixAndDelimiter trims a suffix, plus one extra character which is
// assumed to be a delimiter.
func trimSuffixAndDelimiter(name, suffix string) string {
if strings.HasSuffix(name, suffix) && len(name) > len(suffix)+1 {
return name[:len(name)-(len(suffix)+1)]
} }
return name return name
} }

View file

@ -1,57 +0,0 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The Prometheus Authors
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package otlptranslator
import (
"strings"
"unicode"
)
// LabelNamer is a helper struct to build label names.
type LabelNamer struct {
UTF8Allowed bool
}
// Build normalizes the specified label to follow Prometheus label names standard.
//
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
//
// Labels that start with non-letter rune will be prefixed with "key_".
// An exception is made for double-underscores which are allowed.
//
// If UTF8Allowed is true, the label is returned as is. This option is provided just to
// keep a consistent interface with the MetricNamer.
func (ln *LabelNamer) Build(label string) string {
// Trivial case.
if len(label) == 0 || ln.UTF8Allowed {
return label
}
label = sanitizeLabelName(label)
// If label starts with a number, prepend with "key_".
if unicode.IsDigit(rune(label[0])) {
label = "key_" + label
} else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") {
label = "key" + label
}
return label
}

View file

@ -0,0 +1,86 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/3602785a89162ccc99a940fb9d862219a2d02241/config/config.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The Prometheus Authors
package otlptranslator
// TranslationStrategyOption is a constant that defines how metric and label
// names should be handled during translation. The recommended approach is to
// use either UnderscoreEscapingWithSuffixes for full Prometheus-style
// compatibility, or NoTranslation for Otel-style names.
type TranslationStrategyOption string
var (
// NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit
// and type suffixes may be added to metric names, according to certain rules.
NoUTF8EscapingWithSuffixes TranslationStrategyOption = "NoUTF8EscapingWithSuffixes"
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP
// to Prometheus. This option will translate metric name characters that are
// not alphanumerics/underscores/colons to underscores, and label name
// characters that are not alphanumerics/underscores to underscores. Unit and
// type suffixes may be appended to metric names, according to certain rules.
UnderscoreEscapingWithSuffixes TranslationStrategyOption = "UnderscoreEscapingWithSuffixes"
// UnderscoreEscapingWithoutSuffixes translates metric name characters that
// are not alphanumerics/underscores/colons to underscores, and label name
// characters that are not alphanumerics/underscores to underscores, but
// unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
// the names.
UnderscoreEscapingWithoutSuffixes TranslationStrategyOption = "UnderscoreEscapingWithoutSuffixes"
// NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
// and label names. This offers a way for the OTLP users to use native metric
// names, reducing confusion.
//
// WARNING: This setting has significant known risks and limitations (see
// https://prometheus.io/docs/practices/naming/ for details): * Impaired UX
// when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling
// configuration). * Series collisions which in the best case may result in
// OOO errors, in the worst case a silently malformed time series. For
// instance, you may end up in situation of ingesting `foo.bar` series with
// unit `seconds` and a separate series `foo.bar` with unit `milliseconds`.
//
// As a result, this setting is experimental and currently, should not be used
// in production systems.
//
// TODO(ArthurSens): Mention `type-and-unit-labels` feature
// (https://github.com/prometheus/proposals/pull/39) once released, as
// potential mitigation of the above risks.
NoTranslation TranslationStrategyOption = "NoTranslation"
)
// ShouldEscape returns true if the translation strategy requires that metric
// names be escaped.
func (o TranslationStrategyOption) ShouldEscape() bool {
switch o {
case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
return true
case NoTranslation, NoUTF8EscapingWithSuffixes:
return false
default:
return false
}
}
// ShouldAddSuffixes returns a bool deciding whether the given translation
// strategy should have suffixes added.
func (o TranslationStrategyOption) ShouldAddSuffixes() bool {
switch o {
case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes:
return true
case UnderscoreEscapingWithoutSuffixes, NoTranslation:
return false
default:
return false
}
}

View file

@ -15,14 +15,34 @@ package otlptranslator
import "strings" import "strings"
// UnitNamer is a helper for building compliant unit names. // UnitNamer is a helper for building compliant unit names.
// It processes OpenTelemetry Protocol (OTLP) unit strings and converts them
// to Prometheus-compliant unit names.
//
// Example usage:
//
// namer := UnitNamer{UTF8Allowed: false}
// result := namer.Build("s") // "seconds"
// result = namer.Build("By/s") // "bytes_per_second"
type UnitNamer struct { type UnitNamer struct {
UTF8Allowed bool UTF8Allowed bool
} }
// Build builds a unit name for the specified unit string. // Build builds a unit name for the specified unit string.
// It processes the unit by splitting it into main and per components, // It processes the unit by splitting it into main and per components,
// applying appropriate unit mappings, and cleaning up invalid characters // applying unit mappings, and cleaning up invalid characters when UTF8Allowed is false.
// when the whole UTF-8 character set is not allowed. //
// Unit mappings include:
// - Time: s→seconds, ms→milliseconds, h→hours
// - Bytes: By→bytes, KBy→kilobytes, MBy→megabytes
// - SI: m→meters, V→volts, W→watts
// - Special: 1→"" (empty), %→percent
//
// Examples:
//
// namer := UnitNamer{UTF8Allowed: false}
// namer.Build("s") // "seconds"
// namer.Build("requests/s") // "requests_per_second"
// namer.Build("1") // "" (dimensionless)
func (un *UnitNamer) Build(unit string) string { func (un *UnitNamer) Build(unit string) string {
mainUnit, perUnit := buildUnitSuffixes(unit) mainUnit, perUnit := buildUnitSuffixes(unit)
if !un.UTF8Allowed { if !un.UTF8Allowed {

View file

@ -12,14 +12,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
version: "2"
run: run:
deadline: 5m timeout: 5m
formatters:
enable:
- gofmt
- goimports
linters: linters:
disable-all: true default: none
enable: enable:
#- bodyclose #- bodyclose
# - deadcode ! deprecated since v1.49.0; replaced by 'unused'
#- depguard #- depguard
#- dogsled #- dogsled
#- dupl #- dupl
@ -30,28 +36,24 @@ linters:
- goconst - goconst
- gocritic - gocritic
#- gocyclo #- gocyclo
- gofmt
- goimports
#- gomnd
#- goprintffuncname #- goprintffuncname
- gosec - gosec
- gosimple
- govet - govet
- ineffassign - ineffassign
#- lll #- lll
- misspell - misspell
#- mnd
#- nakedret #- nakedret
#- noctx #- noctx
- nolintlint - nolintlint
#- rowserrcheck #- rowserrcheck
#- scopelint
- staticcheck - staticcheck
#- structcheck ! deprecated since v1.49.0; replaced by 'unused'
- stylecheck
#- typecheck
- unconvert - unconvert
#- unparam #- unparam
- unused - unused
# - varcheck ! deprecated since v1.49.0; replaced by 'unused'
#- whitespace #- whitespace
fast: false exclusions:
presets:
- common-false-positives
- legacy
- std-error-handling

View file

@ -1,8 +1,14 @@
<div align="center">
![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) <a href="https://cobra.dev">
<img width="512" height="535" alt="cobra-logo" src="https://github.com/user-attachments/assets/c8bf9aad-b5ae-41d3-8899-d83baec10af8" />
</a>
</div>
Cobra is a library for creating powerful modern CLI applications. Cobra is a library for creating powerful modern CLI applications.
<a href="https://cobra.dev">Visit Cobra.dev for extensive documentation</a>
Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex
[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199)
<hr>
<div align="center" markdown="1">
<sup>Supported by:</sup>
<br>
<br>
<a href="https://www.warp.dev/cobra">
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
</a>
### [Warp, the AI terminal for devs](https://www.warp.dev/cobra)
[Try Cobra in Warp today](https://www.warp.dev/cobra)<br>
</div>
<hr>
# Overview # Overview

105
vendor/github.com/spf13/cobra/SECURITY.md generated vendored Normal file
View file

@ -0,0 +1,105 @@
# Security Policy
## Reporting a Vulnerability
The `cobra` maintainers take security issues seriously and
we appreciate your efforts to _**responsibly**_ disclose your findings.
We will make every effort to swiftly respond and address concerns.
To report a security vulnerability:
1. **DO NOT** create a public GitHub issue for the vulnerability!
2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability!
3. Send an email to `cobra-security@googlegroups.com`.
4. Include the following details in your report:
- Description of the vulnerability
- Steps to reproduce
- Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.)
- Any potential mitigations you've already identified
5. Allow up to 7 days for an initial response.
You should receive an acknowledgment of your report and an estimated timeline for a fix.
6. (Optional) If you have a fix and would like to contribute your patch, please work
directly with the maintainers via `cobra-security@googlegroups.com` to
coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change.
## Response Process
When a security vulnerability report is received, the `cobra` maintainers will:
1. Confirm receipt of the vulnerability report within 7 days.
2. Assess the report to determine if it constitutes a security vulnerability.
3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it.
4. Develop and test a fix.
5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter.
6. Create a new GitHub Security Advisory to inform the broader Go ecosystem
## Disclosure Policy
The `cobra` maintainers follow a coordinated disclosure process:
1. Security vulnerabilities will be addressed as quickly as possible.
2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities
that are within `cobra` itself.
3. Once a fix is ready, the maintainers will:
- Release a new version containing the fix.
- Update the security advisory with details about the vulnerability.
- Credit the reporter (unless they wish to remain anonymous).
- Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter).
- Announce the vulnerability through appropriate channels
(GitHub Security Advisory, mailing lists, GitHub Releases, etc.)
## Supported Versions
Security fixes will typically only be released for the most recent major release.
## Upstream Security Issues
`cobra` generally will not accept vulnerability reports that originate in upstream
dependencies. I.e., if there is a problem in Go code that `cobra` depends on,
it is best to engage that project's maintainers and owners.
This security policy primarily pertains only to `cobra` itself but if you believe you've
identified a problem that originates in an upstream dependency and is being widely
distributed by `cobra`, please follow the disclosure procedure above: the `cobra`
maintainers will work with you to determine the severity and ecosystem impact.
## Security Updates and CVEs
Information about known security vulnerabilities and CVEs affecting `cobra` will
be published as GitHub Security Advisories at
https://github.com/spf13/cobra/security/advisories.
All users are encouraged to watch the repository and upgrade promptly when
security releases are published.
## `cobra` Security Best Practices for Users
When using `cobra` in your CLIs, the `cobra` maintainers recommend the following:
1. Always use the latest version of `cobra`.
2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management.
3. Always use the latest possible version of Go.
## Security Best Practices for Contributors
When contributing to `cobra`:
1. Be mindful of security implications when adding new features or modifying existing ones.
2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI
(like Kubernetes, Docker, Prometheus, etc. etc.)
3. Write tests that explicitly cover edge cases and potential issues.
4. If you discover a security issue while working on `cobra`, please report it
following the process above rather than opening a public pull request or issue that
addresses the vulnerability.
5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa),
[sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification),
etc.
## Acknowledgments
The `cobra` maintainers would like to thank all security researchers and
community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!!
---
*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.*

View file

@ -39,7 +39,7 @@ const (
) )
// FParseErrWhitelist configures Flag parse errors to be ignored // FParseErrWhitelist configures Flag parse errors to be ignored
type FParseErrWhitelist flag.ParseErrorsWhitelist type FParseErrWhitelist flag.ParseErrorsAllowlist
// Group Structure to manage groups for commands // Group Structure to manage groups for commands
type Group struct { type Group struct {
@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`,
c.Printf("Unknown help topic %#q\n", args) c.Printf("Unknown help topic %#q\n", args)
CheckErr(c.Root().Usage()) CheckErr(c.Root().Usage())
} else { } else {
// FLow the context down to be used in help text
if cmd.ctx == nil {
cmd.ctx = c.ctx
}
cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown
CheckErr(cmd.Help()) CheckErr(cmd.Help())
@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error {
c.mergePersistentFlags() c.mergePersistentFlags()
// do it here after merging all flags and just before parse // do it here after merging all flags and just before parse
c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist)
err := c.Flags().Parse(args) err := c.Flags().Parse(args)
// Print warnings if they occurred (e.g. deprecated flag messages). // Print warnings if they occurred (e.g. deprecated flag messages).
@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error {
fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages()))
} }
if c.HasHelpSubCommands() { if c.HasHelpSubCommands() {
fmt.Fprintf(w, "\n\nAdditional help topcis:") fmt.Fprintf(w, "\n\nAdditional help topics:")
for _, subcmd := range c.Commands() { for _, subcmd := range c.Commands() {
if subcmd.IsAdditionalHelpTopicCommand() { if subcmd.IsAdditionalHelpTopicCommand() {
fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short)

View file

@ -115,6 +115,13 @@ type CompletionOptions struct {
DisableDescriptions bool DisableDescriptions bool
// HiddenDefaultCmd makes the default 'completion' command hidden // HiddenDefaultCmd makes the default 'completion' command hidden
HiddenDefaultCmd bool HiddenDefaultCmd bool
// DefaultShellCompDirective sets the ShellCompDirective that is returned
// if no special directive can be determined
DefaultShellCompDirective *ShellCompDirective
}
func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) {
receiver.DefaultShellCompDirective = &directive
} }
// Completion is a string that can be used for completions // Completion is a string that can be used for completions
@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo
// Error while attempting to parse flags // Error while attempting to parse flags
if flagErr != nil { if flagErr != nil {
// If error type is flagCompError and we don't want flagCompletion we should ignore the error // If error type is flagCompError and we don't want flagCompletion we should ignore the error
if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { if _, ok := flagErr.(*flagCompError); !ok || flagCompletion {
return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr
} }
} }
@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo
} }
} else { } else {
directive = ShellCompDirectiveDefault directive = ShellCompDirectiveDefault
// check current and parent commands for a custom DefaultShellCompDirective
for cmd := finalCmd; cmd != nil; cmd = cmd.parent {
if cmd.CompletionOptions.DefaultShellCompDirective != nil {
directive = *cmd.CompletionOptions.DefaultShellCompDirective
break
}
}
if flag == nil { if flag == nil {
foundLocalNonPersistentFlag := false foundLocalNonPersistentFlag := false
// If TraverseChildren is true on the root command we don't check for // If TraverseChildren is true on the root command we don't check for
@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script.
// shell completion for it (prog __complete completion '') // shell completion for it (prog __complete completion '')
subCmd, cmdArgs, err := c.Find(args) subCmd, cmdArgs, err := c.Find(args)
if err != nil || subCmd.Name() != compCmdName && if err != nil || subCmd.Name() != compCmdName &&
!(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) {
// The completion command is not being called or being completed so we remove it. // The completion command is not being called or being completed so we remove it.
c.RemoveCommand(completionCmd) c.RemoveCommand(completionCmd)
return return

View file

@ -137,12 +137,17 @@ const (
PanicOnError PanicOnError
) )
// ParseErrorsWhitelist defines the parsing errors that can be ignored // ParseErrorsAllowlist defines the parsing errors that can be ignored
type ParseErrorsWhitelist struct { type ParseErrorsAllowlist struct {
// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
UnknownFlags bool UnknownFlags bool
} }
// ParseErrorsWhitelist defines the parsing errors that can be ignored.
//
// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release.
type ParseErrorsWhitelist = ParseErrorsAllowlist
// NormalizedName is a flag name that has been normalized according to rules // NormalizedName is a flag name that has been normalized according to rules
// for the FlagSet (e.g. making '-' and '_' equivalent). // for the FlagSet (e.g. making '-' and '_' equivalent).
type NormalizedName string type NormalizedName string
@ -158,8 +163,13 @@ type FlagSet struct {
// help/usage messages. // help/usage messages.
SortFlags bool SortFlags bool
// ParseErrorsWhitelist is used to configure a whitelist of errors // ParseErrorsAllowlist is used to configure an allowlist of errors
ParseErrorsWhitelist ParseErrorsWhitelist ParseErrorsAllowlist ParseErrorsAllowlist
// ParseErrorsAllowlist is used to configure an allowlist of errors.
//
// Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release.
ParseErrorsWhitelist ParseErrorsAllowlist
name string name string
parsed bool parsed bool
@ -928,7 +938,6 @@ func VarP(value Value, name, shorthand, usage string) {
// returns the error. // returns the error.
func (f *FlagSet) fail(err error) error { func (f *FlagSet) fail(err error) error {
if f.errorHandling != ContinueOnError { if f.errorHandling != ContinueOnError {
fmt.Fprintln(f.Output(), err)
f.usage() f.usage()
} }
return err return err
@ -986,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
f.usage() f.usage()
return a, ErrHelp return a, ErrHelp
case f.ParseErrorsWhitelist.UnknownFlags: case f.ParseErrorsWhitelist.UnknownFlags:
fallthrough
case f.ParseErrorsAllowlist.UnknownFlags:
// --unknown=unknownval arg ... // --unknown=unknownval arg ...
// we do not want to lose arg in this case // we do not want to lose arg in this case
if len(split) >= 2 { if len(split) >= 2 {
@ -1044,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
err = ErrHelp err = ErrHelp
return return
case f.ParseErrorsWhitelist.UnknownFlags: case f.ParseErrorsWhitelist.UnknownFlags:
fallthrough
case f.ParseErrorsAllowlist.UnknownFlags:
// '-f=arg arg ...' // '-f=arg arg ...'
// we do not want to lose arg in this case // we do not want to lose arg in this case
if len(shorthands) > 2 && shorthands[1] == '=' { if len(shorthands) > 2 && shorthands[1] == '=' {
@ -1158,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error {
} }
f.parsed = true f.parsed = true
f.args = make([]string, 0, len(arguments))
if len(arguments) == 0 { if len(arguments) == 0 {
return nil return nil
} }
f.args = make([]string, 0, len(arguments))
set := func(flag *Flag, value string) error { set := func(flag *Flag, value string) error {
return f.Set(flag.Name, value) return f.Set(flag.Name, value)
} }
@ -1174,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error {
case ContinueOnError: case ContinueOnError:
return err return err
case ExitOnError: case ExitOnError:
fmt.Println(err) if err == ErrHelp {
os.Exit(0)
}
fmt.Fprintln(f.Output(), err)
os.Exit(2) os.Exit(2)
case PanicOnError: case PanicOnError:
panic(err) panic(err)
@ -1200,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string)
case ContinueOnError: case ContinueOnError:
return err return err
case ExitOnError: case ExitOnError:
if err == ErrHelp {
os.Exit(0)
}
fmt.Fprintln(f.Output(), err)
os.Exit(2) os.Exit(2)
case PanicOnError: case PanicOnError:
panic(err) panic(err)

View file

@ -8,6 +8,7 @@ import (
goflag "flag" goflag "flag"
"reflect" "reflect"
"strings" "strings"
"time"
) )
// go test flags prefixes // go test flags prefixes
@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
} }
// CopyToGoFlagSet will add all current flags to the given Go flag set.
// Deprecation remarks get copied into the usage description.
// Whenever possible, a flag gets added for which Go flags shows
// a proper type in the help message.
func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) {
f.VisitAll(func(flag *Flag) {
usage := flag.Usage
if flag.Deprecated != "" {
usage += " (DEPRECATED: " + flag.Deprecated + ")"
}
switch value := flag.Value.(type) {
case *stringValue:
newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage)
case *intValue:
newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage)
case *int64Value:
newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage)
case *uintValue:
newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage)
case *uint64Value:
newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage)
case *durationValue:
newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage)
case *float64Value:
newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage)
default:
newSet.Var(flag.Value, flag.Name, usage)
}
})
}
// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), // ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(),
// since by default those are skipped by pflag.Parse(). // since by default those are skipped by pflag.Parse().
// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` // Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)`
@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error {
} }
return goFlagSet.Parse(skippedFlags) return goFlagSet.Parse(skippedFlags)
} }

View file

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"encoding/csv" "encoding/csv"
"fmt" "fmt"
"sort"
"strings" "strings"
) )
@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string {
} }
func (s *stringToStringValue) String() string { func (s *stringToStringValue) String() string {
keys := make([]string, 0, len(*s.value))
for k := range *s.value {
keys = append(keys, k)
}
sort.Strings(keys)
records := make([]string, 0, len(*s.value)>>1) records := make([]string, 0, len(*s.value)>>1)
for k, v := range *s.value { for _, k := range keys {
v := (*s.value)[k]
records = append(records, k+"="+v) records = append(records, k+"="+v)
} }

View file

@ -48,7 +48,13 @@ func (d *timeValue) Type() string {
return "time" return "time"
} }
func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) } func (d *timeValue) String() string {
if d.Time.IsZero() {
return ""
} else {
return d.Time.Format(time.RFC3339Nano)
}
}
// GetTime return the time value of a flag with the given name // GetTime return the time value of a flag with the given name
func (f *FlagSet) GetTime(name string) (time.Time, error) { func (f *FlagSet) GetTime(name string) (time.Time, error) {

View file

@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
} }
// GreaterOrEqual asserts that the first element is greater than or equal to the second // GreaterOrEqual asserts that the first element is greater than or equal to the second
@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
} }
// Less asserts that the first element is less than the second // Less asserts that the first element is less than the second
@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
} }
// LessOrEqual asserts that the first element is less than or equal to the second // LessOrEqual asserts that the first element is less than or equal to the second
@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
} }
// Positive asserts that the specified element is positive // Positive asserts that the specified element is positive
@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper() h.Helper()
} }
zero := reflect.Zero(reflect.TypeOf(e)) zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not positive", e)
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
} }
// Negative asserts that the specified element is negative // Negative asserts that the specified element is negative
@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper() h.Helper()
} }
zero := reflect.Zero(reflect.TypeOf(e)) zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not negative", e)
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
} }
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
compareResult, isComparable := compare(e1, e2, e1Kind) compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable { if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
} }
if !containsValue(allowedComparesResults, compareResult) { if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) return Fail(t, failMessage, msgAndArgs...)
} }
return true return true

View file

@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// assert.Emptyf(t, obj, "error message %s", "formatted") // assert.Emptyf(t, obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") { // assert.Errorf(t, err, "error message %s", "formatted")
// assert.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
} }
// IsNotTypef asserts that the specified objects are not of the same type.
//
// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...) return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()

View file

@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
return ElementsMatchf(a.t, listA, listB, msg, args...) return ElementsMatchf(a.t, listA, listB, msg, args...)
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Empty(obj) // a.Empty(obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...) return Empty(a.t, object, msgAndArgs...)
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Emptyf(obj, "error message %s", "formatted") // a.Emptyf(obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Error(err) { // a.Error(err)
// assert.Equal(t, expectedError, err)
// }
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Errorf(err, "error message %s", "formatted") { // a.Errorf(err, "error message %s", "formatted")
// assert.Equal(t, expectedErrorf, err)
// }
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
return IsNonIncreasingf(a.t, object, msg, args...) return IsNonIncreasingf(a.t, object, msg, args...)
} }
// IsNotType asserts that the specified objects are not of the same type.
//
// a.IsNotType(&NotMyStruct{}, &MyStruct{})
func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNotType(a.t, theType, object, msgAndArgs...)
}
// IsNotTypef asserts that the specified objects are not of the same type.
//
// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNotTypef(a.t, theType, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
//
// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
} }
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
return NotElementsMatchf(a.t, listA, listB, msg, args...) return NotElementsMatchf(a.t, listA, listB, msg, args...)
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmpty(obj) { // if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
return NotEmpty(a.t, object, msgAndArgs...) return NotEmpty(a.t, object, msgAndArgs...)
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmptyf(obj, "error message %s", "formatted") { // if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...) return NotSamef(a.t, expected, actual, msg, args...)
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) // a.NotSubset({"x": 1, "y": 2}, {"z": 3})
// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...) return NotSubset(a.t, list, subset, msgAndArgs...)
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...) return Samef(a.t, expected, actual, msg, args...)
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subset([1, 2, 3], [1, 2]) // a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1}) // a.Subset({"x": 1, "y": 2}, {"x": 1})
// a.Subset([1, 2, 3], {1: "one", 2: "two"})
// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...) return Subset(a.t, list, subset, msgAndArgs...)
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()

View file

@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable { if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
} }
if !containsValue(allowedComparesResults, compareResult) { if !containsValue(allowedComparesResults, compareResult) {

View file

@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/
// of each stack frame leading from the current test to the assert call that // of each stack frame leading from the current test to the assert call that
// failed. // failed.
func CallerInfo() []string { func CallerInfo() []string {
var pc uintptr var pc uintptr
var ok bool
var file string var file string
var line int var line int
var name string var name string
const stackFrameBufferSize = 10
pcs := make([]uintptr, stackFrameBufferSize)
callers := []string{} callers := []string{}
for i := 0; ; i++ { offset := 1
pc, file, line, ok = runtime.Caller(i)
if !ok { for {
// The breaks below failed to terminate the loop, and we ran off the n := runtime.Callers(offset, pcs)
// end of the call stack.
if n == 0 {
break break
} }
// This is a huge edge case, but it will panic if this is the case, see #180 frames := runtime.CallersFrames(pcs[:n])
if file == "<autogenerated>" {
break
}
f := runtime.FuncForPC(pc) for {
if f == nil { frame, more := frames.Next()
break pc = frame.PC
} file = frame.File
name = f.Name() line = frame.Line
// testing.tRunner is the standard library function that calls // This is a huge edge case, but it will panic if this is the case, see #180
// tests. Subtests are called directly by tRunner, without going through if file == "<autogenerated>" {
// the Test/Benchmark/Example function that contains the t.Run calls, so break
// with subtests we should break when we hit tRunner, without adding it }
// to the list of callers.
if name == "testing.tRunner" {
break
}
parts := strings.Split(file, "/") f := runtime.FuncForPC(pc)
if len(parts) > 1 { if f == nil {
filename := parts[len(parts)-1] break
dir := parts[len(parts)-2] }
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { name = f.Name()
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
// testing.tRunner is the standard library function that calls
// tests. Subtests are called directly by tRunner, without going through
// the Test/Benchmark/Example function that contains the t.Run calls, so
// with subtests we should break when we hit tRunner, without adding it
// to the list of callers.
if name == "testing.tRunner" {
break
}
parts := strings.Split(file, "/")
if len(parts) > 1 {
filename := parts[len(parts)-1]
dir := parts[len(parts)-2]
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
}
}
// Drop the package
dotPos := strings.LastIndexByte(name, '.')
name = name[dotPos+1:]
if isTest(name, "Test") ||
isTest(name, "Benchmark") ||
isTest(name, "Example") {
break
}
if !more {
break
} }
} }
// Drop the package // Next batch
segments := strings.Split(name, ".") offset += cap(pcs)
name = segments[len(segments)-1]
if isTest(name, "Test") ||
isTest(name, "Benchmark") ||
isTest(name, "Example") {
break
}
} }
return callers return callers
@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{},
return true return true
} }
func isType(expectedType, object interface{}) bool {
return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { //
// assert.IsType(t, &MyStruct{}, &MyStruct{})
func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
if isType(expectedType, object) {
return true
}
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
}
if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { // IsNotType asserts that the specified objects are not of the same type.
return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) //
// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
if !isType(theType, object) {
return true
} }
if h, ok := t.(tHelper); ok {
return true h.Helper()
}
return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
} }
// Equal asserts that two objects are equal. // Equal asserts that two objects are equal.
@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
} }
return true return true
} }
// validateEqualArgs checks whether provided arguments can be safely used in the // validateEqualArgs checks whether provided arguments can be safely used in the
@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
if !same { if !same {
// both are pointers but not the same type & pointing to the same address // both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+ return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+ "expected: %p %#[1]v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) "actual : %p %#[2]v",
expected, actual), msgAndArgs...)
} }
return true return true
@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
same, ok := samePointers(expected, actual) same, ok := samePointers(expected, actual)
if !ok { if !ok {
//fails when the arguments are not pointers // fails when the arguments are not pointers
return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
} }
if same { if same {
return Fail(t, fmt.Sprintf( return Fail(t, fmt.Sprintf(
"Expected and actual point to the same object: %p %#v", "Expected and actual point to the same object: %p %#[1]v",
expected, expected), msgAndArgs...) expected), msgAndArgs...)
} }
return true return true
} }
@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
func samePointers(first, second interface{}) (same bool, ok bool) { func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
return false, false //not both are pointers return false, false // not both are pointers
} }
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
} }
return true return true
} }
// EqualExportedValues asserts that the types of two objects are equal and their public // EqualExportedValues asserts that the types of two objects are equal and their public
@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
} }
return Equal(t, expected, actual, msgAndArgs...) return Equal(t, expected, actual, msgAndArgs...)
} }
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// isEmpty gets whether the specified object is considered empty or not. // isEmpty gets whether the specified object is considered empty or not.
func isEmpty(object interface{}) bool { func isEmpty(object interface{}) bool {
// get nil case out of the way // get nil case out of the way
if object == nil { if object == nil {
return true return true
} }
objValue := reflect.ValueOf(object) return isEmptyValue(reflect.ValueOf(object))
switch objValue.Kind() {
// collection types are empty when they have no element
case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
// pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
if objValue.IsNil() {
return true
}
deref := objValue.Elem().Interface()
return isEmpty(deref)
// for all other types, compare against the zero value
// array types are empty when they match their zero-initialized state
default:
zero := reflect.Zero(objValue.Type())
return reflect.DeepEqual(object, zero.Interface())
}
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // isEmptyValue gets whether the specified reflect.Value is considered empty or not.
// a slice or a channel with len == 0. func isEmptyValue(objValue reflect.Value) bool {
if objValue.IsZero() {
return true
}
// Special cases of non-zero values that we consider empty
switch objValue.Kind() {
// collection types are empty when they have no element
// Note: array types are empty when they match their zero-initialized state.
case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
// non-nil pointers are empty if the value they point to is empty
case reflect.Ptr:
return isEmptyValue(objValue.Elem())
}
return false
}
// Empty asserts that the given value is "empty".
//
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// assert.Empty(t, obj) // assert.Empty(t, obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object) pass := isEmpty(object)
if !pass { if !pass {
@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
} }
return pass return pass
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if assert.NotEmpty(t, obj) { // if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
} }
return pass return pass
} }
// getLen tries to get the length of an object. // getLen tries to get the length of an object.
@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
} }
return true return true
} }
// False asserts that the specified value is false. // False asserts that the specified value is false.
@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
} }
return true return true
} }
// NotEqual asserts that the specified values are NOT equal. // NotEqual asserts that the specified values are NOT equal.
@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
} }
return true return true
} }
// NotEqualValues asserts that two objects are not equal even when converted to the same type // NotEqualValues asserts that two objects are not equal even when converted to the same type
@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (true, false) if element was not found. // return (true, false) if element was not found.
// return (true, true) if element was found. // return (true, true) if element was found.
func containsElement(list interface{}, element interface{}) (ok, found bool) { func containsElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list) listValue := reflect.ValueOf(list)
listType := reflect.TypeOf(list) listType := reflect.TypeOf(list)
if listType == nil { if listType == nil {
@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) {
} }
} }
return true, false return true, false
} }
// Contains asserts that the specified string, list(array, slice...) or map contains the // Contains asserts that the specified string, list(array, slice...) or map contains the
@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
} }
return true return true
} }
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
} }
return true return true
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
// assert.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
} }
subsetKind := reflect.TypeOf(subset).Kind() subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
} }
@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
} }
subsetList := reflect.ValueOf(subset) subsetList := reflect.ValueOf(subset)
if subsetKind == reflect.Map {
keys := make([]interface{}, subsetList.Len())
for idx, key := range subsetList.MapKeys() {
keys[idx] = key.Interface()
}
subsetList = reflect.ValueOf(keys)
}
for i := 0; i < subsetList.Len(); i++ { for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface() element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element) ok, found := containsElement(list, element)
@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true return true
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
} }
subsetKind := reflect.TypeOf(subset).Kind() subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
} }
@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
} }
subsetList := reflect.ValueOf(subset) subsetList := reflect.ValueOf(subset)
if subsetKind == reflect.Map {
keys := make([]interface{}, subsetList.Len())
for idx, key := range subsetList.MapKeys() {
keys[idx] = key.Interface()
}
subsetList = reflect.ValueOf(keys)
}
for i := 0; i < subsetList.Len(); i++ { for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface() element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element) ok, found := containsElement(list, element)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
} }
if !found { if !found {
return true return true
@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if assert.Error(t, err) { // assert.Error(t, err)
// assert.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil { if err == nil {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool {
default: default:
return r.MatchString(fmt.Sprint(v)) return r.MatchString(fmt.Sprint(v))
} }
} }
// Regexp asserts that a specified regexp matches a string. // Regexp asserts that a specified regexp matches a string.
@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
} }
return !match return !match
} }
// Zero asserts that i is the zero value for its type. // Zero asserts that i is the zero value for its type.
@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
} }
// Shortcut if same bytes
if actual == expected {
return true
}
if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
} }
@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
} }
// Shortcut if same bytes
if actual == expected {
return true
}
if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
} }
@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
} }
ch := make(chan bool, 1) ch := make(chan bool, 1)
checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
ticker := time.NewTicker(tick) ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
for tick := ticker.C; ; { var tickC <-chan time.Time
// Check the condition once first on the initial call.
go checkCond()
for {
select { select {
case <-timer.C: case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...) return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick: case <-tickC:
tick = nil tickC = nil
go func() { ch <- condition() }() go checkCond()
case v := <-ch: case v := <-ch:
if v { if v {
return true return true
} }
tick = ticker.C tickC = ticker.C
} }
} }
} }
@ -1964,6 +2028,9 @@ type CollectT struct {
errors []error errors []error
} }
// Helper is like [testing.T.Helper] but does nothing.
func (CollectT) Helper() {}
// Errorf collects the error. // Errorf collects the error.
func (c *CollectT) Errorf(format string, args ...interface{}) { func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...)) c.errors = append(c.errors, fmt.Errorf(format, args...))
@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
var lastFinishedTickErrs []error var lastFinishedTickErrs []error
ch := make(chan *CollectT, 1) ch := make(chan *CollectT, 1)
checkCond := func() {
collect := new(CollectT)
defer func() {
ch <- collect
}()
condition(collect)
}
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
ticker := time.NewTicker(tick) ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
for tick := ticker.C; ; { var tickC <-chan time.Time
// Check the condition once first on the initial call.
go checkCond()
for {
select { select {
case <-timer.C: case <-timer.C:
for _, err := range lastFinishedTickErrs { for _, err := range lastFinishedTickErrs {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
return Fail(t, "Condition never satisfied", msgAndArgs...) return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick: case <-tickC:
tick = nil tickC = nil
go func() { go checkCond()
collect := new(CollectT)
defer func() {
ch <- collect
}()
condition(collect)
}()
case collect := <-ch: case collect := <-ch:
if !collect.failed() { if !collect.failed() {
return true return true
} }
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = collect.errors lastFinishedTickErrs = collect.errors
tick = ticker.C tickC = ticker.C
} }
} }
} }
@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
} }
ch := make(chan bool, 1) ch := make(chan bool, 1)
checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
ticker := time.NewTicker(tick) ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
for tick := ticker.C; ; { var tickC <-chan time.Time
// Check the condition once first on the initial call.
go checkCond()
for {
select { select {
case <-timer.C: case <-timer.C:
return true return true
case <-tick: case <-tickC:
tick = nil tickC = nil
go func() { ch <- condition() }() go checkCond()
case v := <-ch: case v := <-ch:
if v { if v {
return Fail(t, "Condition satisfied", msgAndArgs...) return Fail(t, "Condition satisfied", msgAndArgs...)
} }
tick = ticker.C tickC = ticker.C
} }
} }
} }
@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
var expectedText string var expectedText string
if target != nil { if target != nil {
expectedText = target.Error() expectedText = target.Error()
if err == nil {
return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
}
} }
chain := buildErrorChainString(err) chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+ "expected: %q\n"+
@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
expectedText = target.Error() expectedText = target.Error()
} }
chain := buildErrorChainString(err) chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+ "found: %q\n"+
@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
return true return true
} }
chain := buildErrorChainString(err) expectedType := reflect.TypeOf(target).Elem().String()
if err == nil {
return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
"expected: %s", expectedType), msgAndArgs...)
}
chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
"expected: %q\n"+ "expected: %s\n"+
"in chain: %s", target, chain, "in chain: %s", expectedType, chain,
), msgAndArgs...) ), msgAndArgs...)
} }
@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa
return true return true
} }
chain := buildErrorChainString(err) chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+ "found: %s\n"+
"in chain: %s", target, chain, "in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
), msgAndArgs...) ), msgAndArgs...)
} }
func buildErrorChainString(err error) string { func unwrapAll(err error) (errs []error) {
errs = append(errs, err)
switch x := err.(type) {
case interface{ Unwrap() error }:
err = x.Unwrap()
if err == nil {
return
}
errs = append(errs, unwrapAll(err)...)
case interface{ Unwrap() []error }:
for _, err := range x.Unwrap() {
errs = append(errs, unwrapAll(err)...)
}
}
return
}
func buildErrorChainString(err error, withType bool) string {
if err == nil { if err == nil {
return "" return ""
} }
e := errors.Unwrap(err) var chain string
chain := fmt.Sprintf("%q", err.Error()) errs := unwrapAll(err)
for e != nil { for i := range errs {
chain += fmt.Sprintf("\n\t%q", e.Error()) if i != 0 {
e = errors.Unwrap(e) chain += "\n\t"
}
chain += fmt.Sprintf("%q", errs[i].Error())
if withType {
chain += fmt.Sprintf(" (%T)", errs[i])
}
} }
return chain return chain
} }

View file

@ -1,5 +1,9 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
// //
// # Note
//
// All functions in this package return a bool value indicating whether the assertion has passed.
//
// # Example Usage // # Example Usage
// //
// The following is a complete example using assert in a standard test function: // The following is a complete example using assert in a standard test function:

View file

@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
if !contains { if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
} }
return contains return contains
@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
if contains { if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
} }
return !contains return !contains

View file

@ -1,5 +1,4 @@
//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
// Package yaml is an implementation of YAML functions that calls a pluggable implementation. // Package yaml is an implementation of YAML functions that calls a pluggable implementation.
// //

View file

@ -1,5 +1,4 @@
//go:build !testify_yaml_fail && !testify_yaml_custom //go:build !testify_yaml_fail && !testify_yaml_custom
// +build !testify_yaml_fail,!testify_yaml_custom
// Package yaml is just an indirection to handle YAML deserialization. // Package yaml is just an indirection to handle YAML deserialization.
// //

View file

@ -1,5 +1,4 @@
//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
// Package yaml is an implementation of YAML functions that always fail. // Package yaml is an implementation of YAML functions that always fail.
// //

View file

@ -23,6 +23,8 @@
// //
// The `require` package have same global functions as in the `assert` package, // The `require` package have same global functions as in the `assert` package,
// but instead of returning a boolean result they call `t.FailNow()`. // but instead of returning a boolean result they call `t.FailNow()`.
// A consequence of this is that it must be called from the goroutine running
// the test function, not from other goroutines created during the test.
// //
// Every assertion function also takes an optional string message as the final argument, // Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs. // allowing custom error messages to be appended to the message the assertion method outputs.

View file

@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
t.FailNow() t.FailNow()
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// require.Empty(t, obj) // require.Empty(t, obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// require.Emptyf(t, obj, "error message %s", "formatted") // require.Emptyf(t, obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if require.Error(t, err) { // require.Error(t, err)
// require.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) { func Error(t TestingT, err error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if require.Errorf(t, err, "error message %s", "formatted") { // require.Errorf(t, err, "error message %s", "formatted")
// require.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) { func Errorf(t TestingT, err error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
t.FailNow() t.FailNow()
} }
// IsNotType asserts that the specified objects are not of the same type.
//
// require.IsNotType(t, &NotMyStruct{}, &MyStruct{})
func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNotType(t, theType, object, msgAndArgs...) {
return
}
t.FailNow()
}
// IsNotTypef asserts that the specified objects are not of the same type.
//
// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNotTypef(t, theType, object, msg, args...) {
return
}
t.FailNow()
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
//
// require.IsType(t, &MyStruct{}, &MyStruct{})
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
} }
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
t.FailNow() t.FailNow()
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if require.NotEmpty(t, obj) { // if require.NotEmpty(t, obj) {
// require.Equal(t, "two", obj[1]) // require.Equal(t, "two", obj[1])
@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if require.NotEmptyf(t, obj, "error message %s", "formatted") { // if require.NotEmptyf(t, obj, "error message %s", "formatted") {
// require.Equal(t, "two", obj[1]) // require.Equal(t, "two", obj[1])
@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow() t.FailNow()
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, [1, 3, 4], [1, 2])
// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
// require.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
t.FailNow() t.FailNow()
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
t.FailNow() t.FailNow()
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, [1, 2, 3], [1, 2])
// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1})
// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
// require.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
t.FailNow() t.FailNow()
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()

View file

@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
ElementsMatchf(a.t, listA, listB, msg, args...) ElementsMatchf(a.t, listA, listB, msg, args...)
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Empty(obj) // a.Empty(obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...) Empty(a.t, object, msgAndArgs...)
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Emptyf(obj, "error message %s", "formatted") // a.Emptyf(obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Error(err) { // a.Error(err)
// assert.Equal(t, expectedError, err)
// }
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Errorf(err, "error message %s", "formatted") { // a.Errorf(err, "error message %s", "formatted")
// assert.Equal(t, expectedErrorf, err)
// }
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
IsNonIncreasingf(a.t, object, msg, args...) IsNonIncreasingf(a.t, object, msg, args...)
} }
// IsNotType asserts that the specified objects are not of the same type.
//
// a.IsNotType(&NotMyStruct{}, &MyStruct{})
func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNotType(a.t, theType, object, msgAndArgs...)
}
// IsNotTypef asserts that the specified objects are not of the same type.
//
// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNotTypef(a.t, theType, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
//
// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
} }
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
NotElementsMatchf(a.t, listA, listB, msg, args...) NotElementsMatchf(a.t, listA, listB, msg, args...)
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmpty(obj) { // if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
NotEmpty(a.t, object, msgAndArgs...) NotEmpty(a.t, object, msgAndArgs...)
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmptyf(obj, "error message %s", "formatted") { // if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
NotSamef(a.t, expected, actual, msg, args...) NotSamef(a.t, expected, actual, msg, args...)
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) // a.NotSubset({"x": 1, "y": 2}, {"z": 3})
// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
NotSubset(a.t, list, subset, msgAndArgs...) NotSubset(a.t, list, subset, msgAndArgs...)
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
Samef(a.t, expected, actual, msg, args...) Samef(a.t, expected, actual, msg, args...)
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subset([1, 2, 3], [1, 2]) // a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1}) // a.Subset({"x": 1, "y": 2}, {"x": 1})
// a.Subset([1, 2, 3], {1: "one", 2: "two"})
// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
Subset(a.t, list, subset, msgAndArgs...) Subset(a.t, list, subset, msgAndArgs...)
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()

View file

@ -16,26 +16,30 @@ type TestInformation struct {
} }
func newSuiteInformation() *SuiteInformation { func newSuiteInformation() *SuiteInformation {
testStats := make(map[string]*TestInformation)
return &SuiteInformation{ return &SuiteInformation{
TestStats: testStats, TestStats: make(map[string]*TestInformation),
} }
} }
func (s SuiteInformation) start(testName string) { func (s *SuiteInformation) start(testName string) {
if s == nil {
return
}
s.TestStats[testName] = &TestInformation{ s.TestStats[testName] = &TestInformation{
TestName: testName, TestName: testName,
Start: time.Now(), Start: time.Now(),
} }
} }
func (s SuiteInformation) end(testName string, passed bool) { func (s *SuiteInformation) end(testName string, passed bool) {
if s == nil {
return
}
s.TestStats[testName].End = time.Now() s.TestStats[testName].End = time.Now()
s.TestStats[testName].Passed = passed s.TestStats[testName].Passed = passed
} }
func (s SuiteInformation) Passed() bool { func (s *SuiteInformation) Passed() bool {
for _, stats := range s.TestStats { for _, stats := range s.TestStats {
if !stats.Passed { if !stats.Passed {
return false return false

View file

@ -7,6 +7,7 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"runtime/debug" "runtime/debug"
"strings"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -15,7 +16,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var allTestsFilter = func(_, _ string) (bool, error) { return true, nil }
var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run")
// Suite is a basic testing suite with methods for storing and // Suite is a basic testing suite with methods for storing and
@ -116,6 +116,11 @@ func (suite *Suite) Run(name string, subtest func()) bool {
}) })
} }
type test = struct {
name string
run func(t *testing.T)
}
// Run takes a testing suite and runs all of the tests attached // Run takes a testing suite and runs all of the tests attached
// to it. // to it.
func Run(t *testing.T, suite TestingSuite) { func Run(t *testing.T, suite TestingSuite) {
@ -124,45 +129,39 @@ func Run(t *testing.T, suite TestingSuite) {
suite.SetT(t) suite.SetT(t)
suite.SetS(suite) suite.SetS(suite)
var suiteSetupDone bool
var stats *SuiteInformation var stats *SuiteInformation
if _, ok := suite.(WithStats); ok { if _, ok := suite.(WithStats); ok {
stats = newSuiteInformation() stats = newSuiteInformation()
} }
tests := []testing.InternalTest{} var tests []test
methodFinder := reflect.TypeOf(suite) methodFinder := reflect.TypeOf(suite)
suiteName := methodFinder.Elem().Name() suiteName := methodFinder.Elem().Name()
for i := 0; i < methodFinder.NumMethod(); i++ { var matchMethodRE *regexp.Regexp
method := methodFinder.Method(i) if *matchMethod != "" {
var err error
ok, err := methodFilter(method.Name) matchMethodRE, err = regexp.Compile(*matchMethod)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err)
os.Exit(1) os.Exit(1)
} }
}
if !ok { for i := 0; i < methodFinder.NumMethod(); i++ {
method := methodFinder.Method(i)
if !strings.HasPrefix(method.Name, "Test") {
continue
}
// Apply -testify.m filter
if matchMethodRE != nil && !matchMethodRE.MatchString(method.Name) {
continue continue
} }
if !suiteSetupDone { test := test{
if stats != nil { name: method.Name,
stats.Start = time.Now() run: func(t *testing.T) {
}
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
setupAllSuite.SetupSuite()
}
suiteSetupDone = true
}
test := testing.InternalTest{
Name: method.Name,
F: func(t *testing.T) {
parentT := suite.T() parentT := suite.T()
suite.SetT(t) suite.SetT(t)
defer recoverAndFailOnPanic(t) defer recoverAndFailOnPanic(t)
@ -171,10 +170,7 @@ func Run(t *testing.T, suite TestingSuite) {
r := recover() r := recover()
if stats != nil { stats.end(method.Name, !t.Failed() && r == nil)
passed := !t.Failed() && r == nil
stats.end(method.Name, passed)
}
if afterTestSuite, ok := suite.(AfterTest); ok { if afterTestSuite, ok := suite.(AfterTest); ok {
afterTestSuite.AfterTest(suiteName, method.Name) afterTestSuite.AfterTest(suiteName, method.Name)
@ -195,59 +191,47 @@ func Run(t *testing.T, suite TestingSuite) {
beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name)
} }
if stats != nil { stats.start(method.Name)
stats.start(method.Name)
}
method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) method.Func.Call([]reflect.Value{reflect.ValueOf(suite)})
}, },
} }
tests = append(tests, test) tests = append(tests, test)
} }
if suiteSetupDone {
defer func() {
if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
tearDownAllSuite.TearDownSuite()
}
if suiteWithStats, measureStats := suite.(WithStats); measureStats { if len(tests) == 0 {
stats.End = time.Now() return
suiteWithStats.HandleStats(suiteName, stats)
}
}()
} }
if stats != nil {
stats.Start = time.Now()
}
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
setupAllSuite.SetupSuite()
}
defer func() {
if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
tearDownAllSuite.TearDownSuite()
}
if suiteWithStats, measureStats := suite.(WithStats); measureStats {
stats.End = time.Now()
suiteWithStats.HandleStats(suiteName, stats)
}
}()
runTests(t, tests) runTests(t, tests)
} }
// Filtering method according to set regular expression func runTests(t *testing.T, tests []test) {
// specified command-line argument -m
func methodFilter(name string) (bool, error) {
if ok, _ := regexp.MatchString("^Test", name); !ok {
return false, nil
}
return regexp.MatchString(*matchMethod, name)
}
func runTests(t testing.TB, tests []testing.InternalTest) {
if len(tests) == 0 { if len(tests) == 0 {
t.Log("warning: no tests to run") t.Log("warning: no tests to run")
return return
} }
r, ok := t.(runner)
if !ok { // backwards compatibility with Go 1.6 and below
if !testing.RunTests(allTestsFilter, tests) {
t.Fail()
}
return
}
for _, test := range tests { for _, test := range tests {
r.Run(test.Name, test.F) t.Run(test.name, test.run)
} }
} }
type runner interface {
Run(name string, f func(t *testing.T)) bool
}

View file

@ -68,16 +68,26 @@ func (c *cmdMinifier) Minify(_ *M, w io.Writer, r io.Reader, _ map[string]string
if j := strings.Index(arg, "$in"); j != -1 { if j := strings.Index(arg, "$in"); j != -1 {
var err error var err error
ext := cmdArgExtension.FindString(arg[j+3:]) ext := cmdArgExtension.FindString(arg[j+3:])
if in, err = os.CreateTemp("", "minify-in-*"+ext); err != nil { if in != nil {
return fmt.Errorf("more than one input arguments")
} else if in, err = os.CreateTemp("", "minify-in-*"+ext); err != nil {
return err return err
} }
defer func() {
os.Remove(in.Name())
}()
cmd.Args[i] = arg[:j] + in.Name() + arg[j+3+len(ext):] cmd.Args[i] = arg[:j] + in.Name() + arg[j+3+len(ext):]
} else if j := strings.Index(arg, "$out"); j != -1 { } else if j := strings.Index(arg, "$out"); j != -1 {
var err error var err error
ext := cmdArgExtension.FindString(arg[j+4:]) ext := cmdArgExtension.FindString(arg[j+4:])
if out, err = os.CreateTemp("", "minify-out-*"+ext); err != nil { if out != nil {
return fmt.Errorf("more than one output arguments")
} else if out, err = os.CreateTemp("", "minify-out-*"+ext); err != nil {
return err return err
} }
defer func() {
os.Remove(out.Name())
}()
cmd.Args[i] = arg[:j] + out.Name() + arg[j+4+len(ext):] cmd.Args[i] = arg[:j] + out.Name() + arg[j+4+len(ext):]
} }
} }

View file

@ -10,7 +10,8 @@ SHA256=`sha256sum v$VERSION.tar.gz`
SHA256=( $SHA256 ) SHA256=( $SHA256 )
GOMODCACHE="$PWD"/go-mod go mod download -modcacherw -x GOMODCACHE="$PWD"/go-mod go mod download -modcacherw -x
tar -caf minify-v$VERSION-deps.tar.xz go-mod tar -caf minify-deps.tar.xz go-mod
rm -rf go-mod
echo "" echo ""
echo "Releasing for AUR..." echo "Releasing for AUR..."

View file

@ -5,360 +5,15 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
) )
const PageSize = 4096 const PageSize = 4096
// BinaryReader is a binary big endian file format reader.
type BinaryReader struct {
Endianness binary.ByteOrder
buf []byte
pos uint32
eof bool
}
// NewBinaryReader returns a big endian binary file format reader.
func NewBinaryReader(buf []byte) *BinaryReader {
if math.MaxUint32 < uint(len(buf)) {
return &BinaryReader{binary.BigEndian, nil, 0, true}
}
return &BinaryReader{binary.BigEndian, buf, 0, false}
}
// NewBinaryReaderLE returns a little endian binary file format reader.
func NewBinaryReaderLE(buf []byte) *BinaryReader {
r := NewBinaryReader(buf)
r.Endianness = binary.LittleEndian
return r
}
// Seek set the reader position in the buffer.
func (r *BinaryReader) Seek(pos uint32) error {
if uint32(len(r.buf)) < pos {
r.eof = true
return io.EOF
}
r.pos = pos
r.eof = false
return nil
}
// Pos returns the reader's position.
func (r *BinaryReader) Pos() uint32 {
return r.pos
}
// Len returns the remaining length of the buffer.
func (r *BinaryReader) Len() uint32 {
return uint32(len(r.buf)) - r.pos
}
// SetLen sets the remaining length of the underlying buffer.
func (r *BinaryReader) SetLen(n uint32) {
r.buf = r.buf[: r.pos+n : r.pos+n]
}
// EOF returns true if we reached the end-of-file.
func (r *BinaryReader) EOF() bool {
return r.eof
}
// Read complies with io.Reader.
func (r *BinaryReader) Read(b []byte) (int, error) {
n := copy(b, r.buf[r.pos:])
r.pos += uint32(n)
if r.pos == uint32(len(r.buf)) {
r.eof = true
return n, io.EOF
}
return n, nil
}
// ReadBytes reads n bytes.
func (r *BinaryReader) ReadBytes(n uint32) []byte {
if r.eof || uint32(len(r.buf))-r.pos < n {
r.eof = true
return nil
}
buf := r.buf[r.pos : r.pos+n : r.pos+n]
r.pos += n
return buf
}
// ReadString reads a string of length n.
func (r *BinaryReader) ReadString(n uint32) string {
return string(r.ReadBytes(n))
}
// ReadByte reads a single byte.
func (r *BinaryReader) ReadByte() byte {
b := r.ReadBytes(1)
if b == nil {
return 0
}
return b[0]
}
// ReadUint8 reads a uint8.
func (r *BinaryReader) ReadUint8() uint8 {
return r.ReadByte()
}
// ReadUint16 reads a uint16.
func (r *BinaryReader) ReadUint16() uint16 {
b := r.ReadBytes(2)
if b == nil {
return 0
}
return r.Endianness.Uint16(b)
}
// ReadUint24 reads a uint24 into a uint32.
func (r *BinaryReader) ReadUint24() uint32 {
b := r.ReadBytes(3)
if b == nil {
return 0
} else if r.Endianness == binary.LittleEndian {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
} else {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
}
// ReadUint32 reads a uint32.
func (r *BinaryReader) ReadUint32() uint32 {
b := r.ReadBytes(4)
if b == nil {
return 0
}
return r.Endianness.Uint32(b)
}
// ReadUint64 reads a uint64.
func (r *BinaryReader) ReadUint64() uint64 {
b := r.ReadBytes(8)
if b == nil {
return 0
}
return r.Endianness.Uint64(b)
}
// ReadInt8 reads an int8.
func (r *BinaryReader) ReadInt8() int8 {
return int8(r.ReadByte())
}
// ReadInt16 reads an int16.
func (r *BinaryReader) ReadInt16() int16 {
return int16(r.ReadUint16())
}
// ReadInt24 reads a int24 into an int32.
func (r *BinaryReader) ReadInt24() int32 {
return int32(r.ReadUint24())
}
// ReadInt32 reads an int32.
func (r *BinaryReader) ReadInt32() int32 {
return int32(r.ReadUint32())
}
// ReadInt64 reads an int64.
func (r *BinaryReader) ReadInt64() int64 {
return int64(r.ReadUint64())
}
type BinaryFileReader struct {
f *os.File
size uint64
offset uint64
Endianness binary.ByteOrder
buf []byte
pos int
}
func NewBinaryFileReader(f *os.File, chunk int) (*BinaryFileReader, error) {
var buf []byte
var size uint64
if chunk == 0 {
var err error
if buf, err = io.ReadAll(f); err != nil {
return nil, err
}
} else {
buf = make([]byte, 0, chunk)
}
if info, err := f.Stat(); err != nil {
return nil, err
} else {
size = uint64(info.Size())
}
return &BinaryFileReader{
f: f,
size: size,
Endianness: binary.BigEndian,
buf: buf,
}, nil
}
func (r *BinaryFileReader) buffer(pos, length uint64) error {
if pos < r.offset || r.offset+uint64(len(r.buf)) < pos+length {
if math.MaxInt64 < pos {
return fmt.Errorf("seek position too large")
} else if _, err := r.f.Seek(int64(pos), 0); err != nil {
return err
} else if n, err := r.f.Read(r.buf[:cap(r.buf)]); err != nil {
return err
} else {
r.offset = pos
r.buf = r.buf[:n]
r.pos = 0
}
}
return nil
}
// Seek set the reader position in the buffer.
func (r *BinaryFileReader) Seek(pos uint64) error {
if r.size <= pos {
return io.EOF
} else if err := r.buffer(pos, 0); err != nil {
return err
}
r.pos = int(pos - r.offset)
return nil
}
// Pos returns the reader's position.
func (r *BinaryFileReader) Pos() uint64 {
return r.offset + uint64(r.pos)
}
// Len returns the remaining length of the buffer.
func (r *BinaryFileReader) Len() uint64 {
return r.size - r.Pos()
}
// Offset returns the offset of the buffer.
func (r *BinaryFileReader) Offset() uint64 {
return r.offset
}
// BufferLen returns the length of the buffer.
func (r *BinaryFileReader) BufferLen() int {
return len(r.buf)
}
// Read complies with io.Reader.
func (r *BinaryFileReader) Read(b []byte) (int, error) {
if len(b) <= cap(r.buf) {
if err := r.buffer(r.offset+uint64(r.pos), uint64(len(b))); err != nil {
return 0, err
}
n := copy(b, r.buf[r.pos:])
r.pos += n
return n, nil
}
// read directly from file
if _, err := r.f.Seek(int64(r.offset)+int64(r.pos), 0); err != nil {
return 0, err
}
n, err := r.f.Read(b)
r.offset += uint64(r.pos + n)
r.pos = 0
r.buf = r.buf[:0]
return n, err
}
// ReadBytes reads n bytes.
func (r *BinaryFileReader) ReadBytes(n int) []byte {
if n < len(r.buf)-r.pos {
b := r.buf[r.pos : r.pos+n]
r.pos += n
return b
}
b := make([]byte, n)
if _, err := r.Read(b); err != nil {
return nil
}
return b
}
// ReadString reads a string of length n.
func (r *BinaryFileReader) ReadString(n int) string {
return string(r.ReadBytes(n))
}
// ReadByte reads a single byte.
func (r *BinaryFileReader) ReadByte() byte {
b := r.ReadBytes(1)
if b == nil {
return 0
}
return b[0]
}
// ReadUint8 reads a uint8.
func (r *BinaryFileReader) ReadUint8() uint8 {
return r.ReadByte()
}
// ReadUint16 reads a uint16.
func (r *BinaryFileReader) ReadUint16() uint16 {
b := r.ReadBytes(2)
if b == nil {
return 0
}
return r.Endianness.Uint16(b)
}
// ReadUint32 reads a uint32.
func (r *BinaryFileReader) ReadUint32() uint32 {
b := r.ReadBytes(4)
if b == nil {
return 0
}
return r.Endianness.Uint32(b)
}
// ReadUint64 reads a uint64.
func (r *BinaryFileReader) ReadUint64() uint64 {
b := r.ReadBytes(8)
if b == nil {
return 0
}
return r.Endianness.Uint64(b)
}
// ReadInt8 reads a int8.
func (r *BinaryFileReader) ReadInt8() int8 {
return int8(r.ReadByte())
}
// ReadInt16 reads a int16.
func (r *BinaryFileReader) ReadInt16() int16 {
return int16(r.ReadUint16())
}
// ReadInt32 reads a int32.
func (r *BinaryFileReader) ReadInt32() int32 {
return int32(r.ReadUint32())
}
// ReadInt64 reads a int64.
func (r *BinaryFileReader) ReadInt64() int64 {
return int64(r.ReadUint64())
}
type IBinaryReader interface { type IBinaryReader interface {
Bytes([]byte, int64, int64) ([]byte, error)
Len() int64
Close() error Close() error
Len() int
Bytes(int, int64) ([]byte, error)
} }
type binaryReaderFile struct { type binaryReaderFile struct {
@ -385,20 +40,21 @@ func (r *binaryReaderFile) Close() error {
} }
// Len returns the length of the underlying memory-mapped file. // Len returns the length of the underlying memory-mapped file.
func (r *binaryReaderFile) Len() int { func (r *binaryReaderFile) Len() int64 {
return int(r.size) return r.size
} }
func (r *binaryReaderFile) Bytes(n int, off int64) ([]byte, error) { func (r *binaryReaderFile) Bytes(b []byte, n, off int64) ([]byte, error) {
if _, err := r.f.Seek(off, 0); err != nil { if _, err := r.f.Seek(off, 0); err != nil {
return nil, err return nil, err
} else if b == nil {
b = make([]byte, n)
} }
b := make([]byte, n)
m, err := r.f.Read(b) m, err := r.f.Read(b)
if err != nil { if err != nil {
return nil, err return nil, err
} else if m != n { } else if int64(m) != n {
return nil, errors.New("file: could not read all bytes") return nil, errors.New("file: could not read all bytes")
} }
return b, nil return b, nil
@ -418,20 +74,26 @@ func (r *binaryReaderBytes) Close() error {
} }
// Len returns the length of the underlying memory-mapped file. // Len returns the length of the underlying memory-mapped file.
func (r *binaryReaderBytes) Len() int { func (r *binaryReaderBytes) Len() int64 {
return len(r.data) return int64(len(r.data))
} }
func (r *binaryReaderBytes) Bytes(n int, off int64) ([]byte, error) { func (r *binaryReaderBytes) Bytes(b []byte, n, off int64) ([]byte, error) {
if off < 0 || int64(len(r.data)) < off { if off < 0 || n < 0 || int64(len(r.data)) < off || int64(len(r.data))-off < n {
return nil, fmt.Errorf("bytes: invalid offset %d", off) return nil, fmt.Errorf("bytes: invalid range %d--%d", off, off+n)
} }
return r.data[off : off+int64(n) : off+int64(n)], nil
data := r.data[off : off+n : off+n]
if b == nil {
return data, nil
}
copy(b, data)
return b, nil
} }
type binaryReaderReader struct { type binaryReaderReader struct {
r io.Reader r io.Reader
n int64 size int64
readerAt bool readerAt bool
seeker bool seeker bool
} }
@ -451,31 +113,33 @@ func (r *binaryReaderReader) Close() error {
} }
// Len returns the length of the underlying memory-mapped file. // Len returns the length of the underlying memory-mapped file.
func (r *binaryReaderReader) Len() int { func (r *binaryReaderReader) Len() int64 {
return int(r.n) return r.size
} }
func (r *binaryReaderReader) Bytes(n int, off int64) ([]byte, error) { func (r *binaryReaderReader) Bytes(b []byte, n, off int64) ([]byte, error) {
if b == nil {
b = make([]byte, n)
}
// seeker seems faster than readerAt by 10% // seeker seems faster than readerAt by 10%
if r.seeker { if r.seeker {
if _, err := r.r.(io.Seeker).Seek(off, 0); err != nil { if _, err := r.r.(io.Seeker).Seek(off, 0); err != nil {
return nil, err return nil, err
} }
b := make([]byte, n)
m, err := r.r.Read(b) m, err := r.r.Read(b)
if err != nil { if err != nil {
return nil, err return nil, err
} else if m != n { } else if int64(m) != n {
return nil, errors.New("file: could not read all bytes") return nil, errors.New("file: could not read all bytes")
} }
return b, nil return b, nil
} else if r.readerAt { } else if r.readerAt {
b := make([]byte, n)
m, err := r.r.(io.ReaderAt).ReadAt(b, off) m, err := r.r.(io.ReaderAt).ReadAt(b, off)
if err != nil { if err != nil {
return nil, err return nil, err
} else if m != n { } else if int64(m) != n {
return nil, errors.New("file: could not read all bytes") return nil, errors.New("file: could not read all bytes")
} }
return b, nil return b, nil
@ -483,22 +147,22 @@ func (r *binaryReaderReader) Bytes(n int, off int64) ([]byte, error) {
return nil, errors.New("io.Seeker and io.ReaderAt not implemented") return nil, errors.New("io.Seeker and io.ReaderAt not implemented")
} }
type BinaryReader2 struct { type BinaryReader struct {
f IBinaryReader f IBinaryReader
pos int64 pos int64
err error err error
Endian binary.ByteOrder ByteOrder binary.ByteOrder
} }
func NewBinaryReader2(f IBinaryReader) *BinaryReader2 { func NewBinaryReader(f IBinaryReader) *BinaryReader {
return &BinaryReader2{ return &BinaryReader{
f: f, f: f,
Endian: binary.BigEndian, ByteOrder: binary.BigEndian,
} }
} }
func NewBinaryReader2Reader(r io.Reader, n int64) (*BinaryReader2, error) { func NewBinaryReaderReader(r io.Reader, n int64) (*BinaryReader, error) {
_, isReaderAt := r.(io.ReaderAt) _, isReaderAt := r.(io.ReaderAt)
_, isSeeker := r.(io.Seeker) _, isSeeker := r.(io.Seeker)
@ -512,27 +176,44 @@ func NewBinaryReader2Reader(r io.Reader, n int64) (*BinaryReader2, error) {
} }
f = newBinaryReaderBytes(b) f = newBinaryReaderBytes(b)
} }
return NewBinaryReader2(f), nil return NewBinaryReader(f), nil
} }
func NewBinaryReader2Bytes(data []byte) *BinaryReader2 { func NewBinaryReaderBytes(data []byte) *BinaryReader {
f := newBinaryReaderBytes(data) f := newBinaryReaderBytes(data)
return NewBinaryReader2(f) return NewBinaryReader(f)
} }
func NewBinaryReader2File(filename string) (*BinaryReader2, error) { func NewBinaryReaderFile(filename string) (*BinaryReader, error) {
f, err := newBinaryReaderFile(filename) f, err := newBinaryReaderFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return NewBinaryReader2(f), nil return NewBinaryReader(f), nil
} }
func (r *BinaryReader2) Err() error { func (r *BinaryReader) IBinaryReader() IBinaryReader {
return r.f
}
func (r *BinaryReader) Clone() *BinaryReader {
f := r.f
if cloner, ok := f.(interface{ Clone() IBinaryReader }); ok {
f = cloner.Clone()
}
return &BinaryReader{
f: f,
pos: r.pos,
err: r.err,
ByteOrder: r.ByteOrder,
}
}
func (r *BinaryReader) Err() error {
return r.err return r.err
} }
func (r *BinaryReader2) Close() error { func (r *BinaryReader) Close() error {
if err := r.f.Close(); err != nil { if err := r.f.Close(); err != nil {
return err return err
} }
@ -540,68 +221,90 @@ func (r *BinaryReader2) Close() error {
} }
// InPageCache returns true if the range is already in the page cache (for mmap). // InPageCache returns true if the range is already in the page cache (for mmap).
func (r *BinaryReader2) InPageCache(start, end int64) bool { func (r *BinaryReader) InPageCache(start, end int64) bool {
index := int64(r.Pos()) / PageSize index := r.Pos() / PageSize
return start/PageSize == index && end/PageSize == index return start/PageSize == index && end/PageSize == index
} }
// Free frees all previously read bytes, you cannot seek from before this position (for reader).
func (r *BinaryReader2) Free() {
}
// Pos returns the reader's position. // Pos returns the reader's position.
func (r *BinaryReader2) Pos() int64 { func (r *BinaryReader) Pos() int64 {
return r.pos return r.pos
} }
// Len returns the remaining length of the buffer. // Len returns the remaining length of the buffer.
func (r *BinaryReader2) Len() int { func (r *BinaryReader) Len() int64 {
return int(int64(r.f.Len()) - int64(r.pos)) return r.f.Len() - r.pos
} }
func (r *BinaryReader2) Seek(pos int64) { // Seek complies with io.Seeker.
r.pos = pos func (r *BinaryReader) Seek(off int64, whence int) (int64, error) {
if whence == 0 {
if off < 0 || r.f.Len() < off {
return 0, fmt.Errorf("invalid offset")
}
r.pos = off
} else if whence == 1 {
if r.pos+off < 0 || r.f.Len() < r.pos+off {
return 0, fmt.Errorf("invalid offset")
}
r.pos += off
} else if whence == 2 {
if off < -r.f.Len() || 0 < off {
return 0, fmt.Errorf("invalid offset")
}
r.pos = r.f.Len() - off
} else {
return 0, fmt.Errorf("invalid whence")
}
return r.pos, nil
} }
// Read complies with io.Reader. // Read complies with io.Reader.
func (r *BinaryReader2) Read(b []byte) (int, error) { func (r *BinaryReader) Read(b []byte) (int, error) {
data, err := r.f.Bytes(len(b), r.pos) data, err := r.f.Bytes(b, int64(len(b)), r.pos)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return 0, err return 0, err
} }
n := copy(b, data) r.pos += int64(len(data))
r.pos += int64(len(b)) return len(data), err
return n, err
} }
// ReadAt complies with io.ReaderAt. // ReadAt complies with io.ReaderAt.
func (r *BinaryReader2) ReadAt(b []byte, off int64) (int, error) { func (r *BinaryReader) ReadAt(b []byte, off int64) (int, error) {
data, err := r.f.Bytes(len(b), off) data, err := r.f.Bytes(b, int64(len(b)), off)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return 0, err return 0, err
} }
n := copy(b, data) return len(data), err
return n, err
} }
// ReadBytes reads n bytes. // ReadBytes reads n bytes.
func (r *BinaryReader2) ReadBytes(n int) []byte { func (r *BinaryReader) ReadBytes(n int64) []byte {
data, err := r.f.Bytes(n, r.pos) data, err := r.f.Bytes(nil, n, r.pos)
if err != nil { if err != nil {
r.err = err r.err = err
return nil return nil
} }
r.pos += int64(n) r.pos += n
return data return data
} }
// ReadString reads a string of length n. // ReadString reads a string of length n.
func (r *BinaryReader2) ReadString(n int) string { func (r *BinaryReader) ReadString(n int64) string {
return string(r.ReadBytes(n)) return string(r.ReadBytes(n))
} }
// ReadByte reads a single byte. // ReadByte reads a single byte.
func (r *BinaryReader2) ReadByte() byte { func (r *BinaryReader) ReadByte() (byte, error) {
data := r.ReadBytes(1)
if data == nil {
return 0, r.err
}
return data[0], nil
}
// ReadUint8 reads a uint8.
func (r *BinaryReader) ReadUint8() uint8 {
data := r.ReadBytes(1) data := r.ReadBytes(1)
if data == nil { if data == nil {
return 0 return 0
@ -609,81 +312,93 @@ func (r *BinaryReader2) ReadByte() byte {
return data[0] return data[0]
} }
// ReadUint8 reads a uint8.
func (r *BinaryReader2) ReadUint8() uint8 {
return r.ReadByte()
}
// ReadUint16 reads a uint16. // ReadUint16 reads a uint16.
func (r *BinaryReader2) ReadUint16() uint16 { func (r *BinaryReader) ReadUint16() uint16 {
data := r.ReadBytes(2) data := r.ReadBytes(2)
if data == nil { if data == nil {
return 0 return 0
} else if r.Endian == binary.LittleEndian { } else if r.ByteOrder == binary.LittleEndian {
return uint16(data[1])<<8 | uint16(data[0]) return uint16(data[1])<<8 | uint16(data[0])
} }
return uint16(data[0])<<8 | uint16(data[1]) return uint16(data[0])<<8 | uint16(data[1])
} }
// ReadUint24 reads a uint24 into a uint32.
func (r *BinaryReader) ReadUint24() uint32 {
b := r.ReadBytes(3)
if b == nil {
return 0
} else if r.ByteOrder == binary.LittleEndian {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
} else {
return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
}
}
// ReadUint32 reads a uint32. // ReadUint32 reads a uint32.
func (r *BinaryReader2) ReadUint32() uint32 { func (r *BinaryReader) ReadUint32() uint32 {
data := r.ReadBytes(4) data := r.ReadBytes(4)
if data == nil { if data == nil {
return 0 return 0
} else if r.Endian == binary.LittleEndian { } else if r.ByteOrder == binary.LittleEndian {
return uint32(data[3])<<24 | uint32(data[2])<<16 | uint32(data[1])<<8 | uint32(data[0]) return uint32(data[3])<<24 | uint32(data[2])<<16 | uint32(data[1])<<8 | uint32(data[0])
} }
return uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) return uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
} }
// ReadUint64 reads a uint64. // ReadUint64 reads a uint64.
func (r *BinaryReader2) ReadUint64() uint64 { func (r *BinaryReader) ReadUint64() uint64 {
data := r.ReadBytes(8) data := r.ReadBytes(8)
if data == nil { if data == nil {
return 0 return 0
} else if r.Endian == binary.LittleEndian { } else if r.ByteOrder == binary.LittleEndian {
return uint64(data[7])<<56 | uint64(data[6])<<48 | uint64(data[5])<<40 | uint64(data[4])<<32 | uint64(data[3])<<24 | uint64(data[2])<<16 | uint64(data[1])<<8 | uint64(data[0]) return uint64(data[7])<<56 | uint64(data[6])<<48 | uint64(data[5])<<40 | uint64(data[4])<<32 | uint64(data[3])<<24 | uint64(data[2])<<16 | uint64(data[1])<<8 | uint64(data[0])
} }
return uint64(data[0])<<56 | uint64(data[1])<<48 | uint64(data[2])<<40 | uint64(data[3])<<32 | uint64(data[4])<<24 | uint64(data[5])<<16 | uint64(data[6])<<8 | uint64(data[7]) return uint64(data[0])<<56 | uint64(data[1])<<48 | uint64(data[2])<<40 | uint64(data[3])<<32 | uint64(data[4])<<24 | uint64(data[5])<<16 | uint64(data[6])<<8 | uint64(data[7])
} }
// ReadInt8 reads a int8. // ReadInt8 reads a int8.
func (r *BinaryReader2) ReadInt8() int8 { func (r *BinaryReader) ReadInt8() int8 {
return int8(r.ReadByte()) return int8(r.ReadUint8())
} }
// ReadInt16 reads a int16. // ReadInt16 reads a int16.
func (r *BinaryReader2) ReadInt16() int16 { func (r *BinaryReader) ReadInt16() int16 {
return int16(r.ReadUint16()) return int16(r.ReadUint16())
} }
// ReadInt24 reads a int24 into an int32.
func (r *BinaryReader) ReadInt24() int32 {
return int32(r.ReadUint24())
}
// ReadInt32 reads a int32. // ReadInt32 reads a int32.
func (r *BinaryReader2) ReadInt32() int32 { func (r *BinaryReader) ReadInt32() int32 {
return int32(r.ReadUint32()) return int32(r.ReadUint32())
} }
// ReadInt64 reads a int64. // ReadInt64 reads a int64.
func (r *BinaryReader2) ReadInt64() int64 { func (r *BinaryReader) ReadInt64() int64 {
return int64(r.ReadUint64()) return int64(r.ReadUint64())
} }
// BinaryWriter is a big endian binary file format writer. // BinaryWriter is a big endian binary file format writer.
type BinaryWriter struct { type BinaryWriter struct {
buf []byte buf []byte
Endian binary.ByteOrder ByteOrder binary.AppendByteOrder
} }
// NewBinaryWriter returns a big endian binary file format writer. // NewBinaryWriter returns a big endian binary file format writer.
func NewBinaryWriter(buf []byte) *BinaryWriter { func NewBinaryWriter(buf []byte) *BinaryWriter {
return &BinaryWriter{ return &BinaryWriter{
buf: buf, buf: buf,
Endian: binary.BigEndian, ByteOrder: binary.BigEndian,
} }
} }
// Len returns the buffer's length in bytes. // Len returns the buffer's length in bytes.
func (w *BinaryWriter) Len() uint32 { func (w *BinaryWriter) Len() int64 {
return uint32(len(w.buf)) return int64(len(w.buf))
} }
// Bytes returns the buffer's bytes. // Bytes returns the buffer's bytes.
@ -719,23 +434,26 @@ func (w *BinaryWriter) WriteUint8(v uint8) {
// WriteUint16 writes the given uint16 to the buffer. // WriteUint16 writes the given uint16 to the buffer.
func (w *BinaryWriter) WriteUint16(v uint16) { func (w *BinaryWriter) WriteUint16(v uint16) {
pos := len(w.buf) w.buf = w.ByteOrder.AppendUint16(w.buf, v)
w.buf = append(w.buf, make([]byte, 2)...) }
w.Endian.PutUint16(w.buf[pos:], v)
// WriteUint24 writes the given uint32 as a uint24 to the buffer.
func (w *BinaryWriter) WriteUint24(v uint32) {
if w.ByteOrder == binary.LittleEndian {
w.buf = append(w.buf, byte(v), byte(v>>8), byte(v>>16))
} else {
w.buf = append(w.buf, byte(v>>16), byte(v>>8), byte(v))
}
} }
// WriteUint32 writes the given uint32 to the buffer. // WriteUint32 writes the given uint32 to the buffer.
func (w *BinaryWriter) WriteUint32(v uint32) { func (w *BinaryWriter) WriteUint32(v uint32) {
pos := len(w.buf) w.buf = w.ByteOrder.AppendUint32(w.buf, v)
w.buf = append(w.buf, make([]byte, 4)...)
w.Endian.PutUint32(w.buf[pos:], v)
} }
// WriteUint64 writes the given uint64 to the buffer. // WriteUint64 writes the given uint64 to the buffer.
func (w *BinaryWriter) WriteUint64(v uint64) { func (w *BinaryWriter) WriteUint64(v uint64) {
pos := len(w.buf) w.buf = w.ByteOrder.AppendUint64(w.buf, v)
w.buf = append(w.buf, make([]byte, 8)...)
w.Endian.PutUint64(w.buf[pos:], v)
} }
// WriteInt8 writes the given int8 to the buffer. // WriteInt8 writes the given int8 to the buffer.
@ -748,6 +466,11 @@ func (w *BinaryWriter) WriteInt16(v int16) {
w.WriteUint16(uint16(v)) w.WriteUint16(uint16(v))
} }
// WriteInt24 writes the given int32 as an in24 to the buffer.
func (w *BinaryWriter) WriteInt24(v int32) {
w.WriteUint24(uint32(v))
}
// WriteInt32 writes the given int32 to the buffer. // WriteInt32 writes the given int32 to the buffer.
func (w *BinaryWriter) WriteInt32(v int32) { func (w *BinaryWriter) WriteInt32(v int32) {
w.WriteUint32(uint32(v)) w.WriteUint32(uint32(v))
@ -794,7 +517,7 @@ func (r *BitmapReader) Read() bool {
// BitmapWriter is a binary bitmap writer. // BitmapWriter is a binary bitmap writer.
type BitmapWriter struct { type BitmapWriter struct {
buf []byte buf []byte
pos uint32 pos uint64
} }
// NewBitmapWriter returns a binary bitmap writer. // NewBitmapWriter returns a binary bitmap writer.
@ -803,8 +526,8 @@ func NewBitmapWriter(buf []byte) *BitmapWriter {
} }
// Len returns the buffer's length in bytes. // Len returns the buffer's length in bytes.
func (w *BitmapWriter) Len() uint32 { func (w *BitmapWriter) Len() int64 {
return uint32(len(w.buf)) return int64(len(w.buf))
} }
// Bytes returns the buffer's bytes. // Bytes returns the buffer's bytes.
@ -814,7 +537,7 @@ func (w *BitmapWriter) Bytes() []byte {
// Write writes the next bit. // Write writes the next bit.
func (w *BitmapWriter) Write(bit bool) { func (w *BitmapWriter) Write(bit bool) {
if uint32(len(w.buf)) <= (w.pos+1)/8 { if uint64(len(w.buf)) <= (w.pos+1)/8 {
w.buf = append(w.buf, 0) w.buf = append(w.buf, 0)
} }
if bit { if bit {

View file

@ -5,7 +5,6 @@ package parse
import ( import (
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
"runtime" "runtime"
"syscall" "syscall"
@ -13,6 +12,7 @@ import (
type binaryReaderMmap struct { type binaryReaderMmap struct {
data []byte data []byte
size int64
} }
func newBinaryReaderMmap(filename string) (*binaryReaderMmap, error) { func newBinaryReaderMmap(filename string) (*binaryReaderMmap, error) {
@ -47,7 +47,7 @@ func newBinaryReaderMmap(filename string) (*binaryReaderMmap, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
r := &binaryReaderMmap{data} r := &binaryReaderMmap{data, size}
runtime.SetFinalizer(r, (*binaryReaderMmap).Close) runtime.SetFinalizer(r, (*binaryReaderMmap).Close)
return r, nil return r, nil
} }
@ -67,25 +67,29 @@ func (r *binaryReaderMmap) Close() error {
} }
// Len returns the length of the underlying memory-mapped file. // Len returns the length of the underlying memory-mapped file.
func (r *binaryReaderMmap) Len() int { func (r *binaryReaderMmap) Len() int64 {
return len(r.data) return r.size
} }
func (r *binaryReaderMmap) Bytes(n int, off int64) ([]byte, error) { func (r *binaryReaderMmap) Bytes(b []byte, n, off int64) ([]byte, error) {
if r.data == nil { if r.data == nil {
return nil, errors.New("mmap: closed") return nil, errors.New("mmap: closed")
} else if off < 0 || int64(len(r.data)) < off { } else if off < 0 || n < 0 || int64(len(r.data)) < off || int64(len(r.data))-off < n {
return nil, fmt.Errorf("mmap: invalid offset %d", off) return nil, fmt.Errorf("mmap: invalid range %d--%d", off, off+n)
} else if int64(len(r.data)-n) < off {
return r.data[off:len(r.data):len(r.data)], io.EOF
} }
return r.data[off : off+int64(n) : off+int64(n)], nil
data := r.data[off : off+n : off+n]
if b == nil {
return data, nil
}
copy(b, data)
return b, nil
} }
func NewBinaryReader2Mmap(filename string) (*BinaryReader2, error) { func NewBinaryReaderMmap(filename string) (*BinaryReader, error) {
f, err := newBinaryReaderMmap(filename) f, err := newBinaryReaderMmap(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return NewBinaryReader2(f), nil return NewBinaryReader(f), nil
} }

View file

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -13,7 +13,6 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/instrumentation"
@ -231,7 +230,7 @@ func convertExponentialBuckets(bucketSpans []*dto.BucketSpan, deltas []int64) me
// Increase the count index by the Offset to insert Offset zeroes // Increase the count index by the Offset to insert Offset zeroes
countIndex += bs.GetOffset() countIndex += bs.GetOffset()
} }
for j := uint32(0); j < bs.GetLength(); j++ { for range bs.GetLength() {
// Convert deltas to the cumulative number of observations // Convert deltas to the cumulative number of observations
count += deltas[deltaIndex] count += deltas[deltaIndex]
deltaIndex++ deltaIndex++
@ -367,11 +366,12 @@ func convertExemplar(exemplar *dto.Exemplar) metricdata.Exemplar[float64] {
var traceID, spanID []byte var traceID, spanID []byte
// find the trace ID and span ID in attributes, if it exists // find the trace ID and span ID in attributes, if it exists
for _, label := range exemplar.GetLabel() { for _, label := range exemplar.GetLabel() {
if label.GetName() == traceIDLabel { switch label.GetName() {
case traceIDLabel:
traceID = []byte(label.GetValue()) traceID = []byte(label.GetValue())
} else if label.GetName() == spanIDLabel { case spanIDLabel:
spanID = []byte(label.GetValue()) spanID = []byte(label.GetValue())
} else { default:
attrs = append(attrs, attribute.String(label.GetName(), label.GetValue())) attrs = append(attrs, attribute.String(label.GetName(), label.GetValue()))
} }
} }

View file

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -84,10 +84,10 @@ func init() {
return nil, errInvalidOTLPProtocol return nil, errInvalidOTLPProtocol
} }
}) })
RegisterLogExporter("console", func(ctx context.Context) (log.Exporter, error) { RegisterLogExporter("console", func(context.Context) (log.Exporter, error) {
return stdoutlog.New() return stdoutlog.New()
}) })
RegisterLogExporter("none", func(ctx context.Context) (log.Exporter, error) { RegisterLogExporter("none", func(context.Context) (log.Exporter, error) {
return noopLogExporter{}, nil return noopLogExporter{}, nil
}) })
} }

View file

@ -15,14 +15,14 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
prometheusbridge "go.opentelemetry.io/contrib/bridges/prometheus"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
promexporter "go.opentelemetry.io/otel/exporters/prometheus" promexporter "go.opentelemetry.io/otel/exporters/prometheus"
"go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
"go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric"
prometheusbridge "go.opentelemetry.io/contrib/bridges/prometheus"
) )
const otelExporterOTLPMetricsProtoEnvKey = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL" const otelExporterOTLPMetricsProtoEnvKey = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
@ -154,7 +154,7 @@ func init() {
} }
return metric.NewPeriodicReader(r, readerOpts...), nil return metric.NewPeriodicReader(r, readerOpts...), nil
}) })
RegisterMetricReader("none", func(ctx context.Context) (metric.Reader, error) { RegisterMetricReader("none", func(context.Context) (metric.Reader, error) {
return newNoopMetricReader(), nil return newNoopMetricReader(), nil
}) })
RegisterMetricReader("prometheus", func(ctx context.Context) (metric.Reader, error) { RegisterMetricReader("prometheus", func(ctx context.Context) (metric.Reader, error) {
@ -211,10 +211,10 @@ func init() {
return readerWithServer{lis.Addr(), reader, &server}, nil return readerWithServer{lis.Addr(), reader, &server}, nil
}) })
RegisterMetricProducer("prometheus", func(ctx context.Context) (metric.Producer, error) { RegisterMetricProducer("prometheus", func(context.Context) (metric.Producer, error) {
return prometheusbridge.NewMetricProducer(), nil return prometheusbridge.NewMetricProducer(), nil
}) })
RegisterMetricProducer("none", func(ctx context.Context) (metric.Producer, error) { RegisterMetricProducer("none", func(context.Context) (metric.Producer, error) {
return newNoopMetricProducer(), nil return newNoopMetricProducer(), nil
}) })
} }

View file

@ -18,12 +18,12 @@ type noopSpanExporter struct{}
var _ trace.SpanExporter = noopSpanExporter{} var _ trace.SpanExporter = noopSpanExporter{}
// ExportSpans is part of trace.SpanExporter interface. // ExportSpans is part of trace.SpanExporter interface.
func (e noopSpanExporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { func (noopSpanExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error {
return nil return nil
} }
// Shutdown is part of trace.SpanExporter interface. // Shutdown is part of trace.SpanExporter interface.
func (e noopSpanExporter) Shutdown(ctx context.Context) error { func (noopSpanExporter) Shutdown(context.Context) error {
return nil return nil
} }
@ -51,7 +51,7 @@ func IsNoneMetricReader(e metric.Reader) bool {
type noopMetricProducer struct{} type noopMetricProducer struct{}
func (e noopMetricProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) { func (noopMetricProducer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) {
return nil, nil return nil, nil
} }
@ -65,17 +65,17 @@ type noopLogExporter struct{}
var _ log.Exporter = noopLogExporter{} var _ log.Exporter = noopLogExporter{}
// ExportSpans is part of log.Exporter interface. // ExportSpans is part of log.Exporter interface.
func (e noopLogExporter) Export(ctx context.Context, records []log.Record) error { func (noopLogExporter) Export(context.Context, []log.Record) error {
return nil return nil
} }
// Shutdown is part of log.Exporter interface. // Shutdown is part of log.Exporter interface.
func (e noopLogExporter) Shutdown(ctx context.Context) error { func (noopLogExporter) Shutdown(context.Context) error {
return nil return nil
} }
// ForceFlush is part of log.Exporter interface. // ForceFlush is part of log.Exporter interface.
func (e noopLogExporter) ForceFlush(ctx context.Context) error { func (noopLogExporter) ForceFlush(context.Context) error {
return nil return nil
} }

View file

@ -89,10 +89,10 @@ func init() {
return nil, errInvalidOTLPProtocol return nil, errInvalidOTLPProtocol
} }
}) })
RegisterSpanExporter("console", func(ctx context.Context) (trace.SpanExporter, error) { RegisterSpanExporter("console", func(context.Context) (trace.SpanExporter, error) {
return stdouttrace.New() return stdouttrace.New()
}) })
RegisterSpanExporter("none", func(ctx context.Context) (trace.SpanExporter, error) { RegisterSpanExporter("none", func(context.Context) (trace.SpanExporter, error) {
return noopSpanExporter{}, nil return noopSpanExporter{}, nil
}) })
} }

View file

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -56,7 +56,7 @@ func (r *runtime) register() error {
} }
_, err = r.meter.RegisterCallback( _, err = r.meter.RegisterCallback(
func(ctx context.Context, o metric.Observer) error { func(_ context.Context, o metric.Observer) error {
o.ObserveInt64(uptime, time.Since(startTime).Milliseconds()) o.ObserveInt64(uptime, time.Since(startTime).Milliseconds())
o.ObserveInt64(goroutines, int64(goruntime.NumGoroutine())) o.ObserveInt64(goroutines, int64(goruntime.NumGoroutine()))
o.ObserveInt64(cgoCalls, goruntime.NumCgoCall()) o.ObserveInt64(cgoCalls, goruntime.NumCgoCall())
@ -179,7 +179,7 @@ func (r *runtime) registerMemStats() error {
// observation interval is too slow. // observation interval is too slow.
if pauseTotalNs, err = r.meter.Int64ObservableCounter( if pauseTotalNs, err = r.meter.Int64ObservableCounter(
"process.runtime.go.gc.pause_total_ns", "process.runtime.go.gc.pause_total_ns",
// TODO: nanoseconds units metric.WithUnit("ns"),
metric.WithDescription("Cumulative nanoseconds in GC stop-the-world pauses since the program started"), metric.WithDescription("Cumulative nanoseconds in GC stop-the-world pauses since the program started"),
); err != nil { ); err != nil {
return err return err
@ -187,7 +187,7 @@ func (r *runtime) registerMemStats() error {
if gcPauseNs, err = r.meter.Int64Histogram( if gcPauseNs, err = r.meter.Int64Histogram(
"process.runtime.go.gc.pause_ns", "process.runtime.go.gc.pause_ns",
// TODO: nanoseconds units metric.WithUnit("ns"),
metric.WithDescription("Amount of nanoseconds in GC stop-the-world pauses"), metric.WithDescription("Amount of nanoseconds in GC stop-the-world pauses"),
); err != nil { ); err != nil {
return err return err
@ -244,7 +244,7 @@ func clampUint64(v uint64) int64 {
if v > math.MaxInt64 { if v > math.MaxInt64 {
return math.MaxInt64 return math.MaxInt64
} }
return int64(v) // nolint: gosec // Overflow checked above. return int64(v)
} }
func computeGCPauses( func computeGCPauses(
@ -271,7 +271,7 @@ func computeGCPauses(
return return
} }
length := uint64(n) // nolint: gosec // n >= 0 length := uint64(n)
i := uint64(lastNumGC) % length i := uint64(lastNumGC) % length
j := uint64(currentNumGC) % length j := uint64(currentNumGC) % length

View file

@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/semconv/v1.34.0/goconv" "go.opentelemetry.io/otel/semconv/v1.37.0/goconv"
"go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime" "go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime"
"go.opentelemetry.io/contrib/instrumentation/runtime/internal/x" "go.opentelemetry.io/contrib/instrumentation/runtime/internal/x"
@ -90,7 +90,7 @@ func Start(opts ...Option) error {
collector := newCollector(c.MinimumReadMemStatsInterval, runtimeMetrics) collector := newCollector(c.MinimumReadMemStatsInterval, runtimeMetrics)
var lock sync.Mutex var lock sync.Mutex
_, err = meter.RegisterCallback( _, err = meter.RegisterCallback(
func(ctx context.Context, o metric.Observer) error { func(_ context.Context, o metric.Observer) error {
lock.Lock() lock.Lock()
defer lock.Unlock() defer lock.Unlock()
collector.refresh() collector.refresh()
@ -187,7 +187,7 @@ func (g *goCollector) getInt(name string) int64 {
if v > math.MaxInt64 { if v > math.MaxInt64 {
return math.MaxInt64 return math.MaxInt64
} }
return int64(v) // nolint: gosec // Overflow checked above. return int64(v)
} }
return 0 return 0
} }

View file

@ -5,6 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"
// Version is the current release version of the runtime instrumentation. // Version is the current release version of the runtime instrumentation.
func Version() string { func Version() string {
return "0.62.0" return "0.63.0"
// This string is updated by the pre_release.sh script during release // This string is updated by the pre_release.sh script during release
} }

View file

@ -7,3 +7,4 @@ ans
nam nam
valu valu
thirdparty thirdparty
addOpt

View file

@ -10,6 +10,7 @@ linters:
- depguard - depguard
- errcheck - errcheck
- errorlint - errorlint
- gocritic
- godot - godot
- gosec - gosec
- govet - govet
@ -86,6 +87,18 @@ linters:
deny: deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
desc: Do not use cross-module internal packages. desc: Do not use cross-module internal packages.
gocritic:
disabled-checks:
- appendAssign
- commentedOutCode
- dupArg
- hugeParam
- importShadow
- preferDecodeRune
- rangeValCopy
- unnamedResult
- whyNoLint
enable-all: true
godot: godot:
exclude: exclude:
# Exclude links. # Exclude links.
@ -167,7 +180,10 @@ linters:
- fmt.Print - fmt.Print
- fmt.Printf - fmt.Printf
- fmt.Println - fmt.Println
- name: unused-parameter
- name: unused-receiver
- name: unnecessary-stmt - name: unnecessary-stmt
- name: use-any
- name: useless-break - name: useless-break
- name: var-declaration - name: var-declaration
- name: var-naming - name: var-naming
@ -224,10 +240,6 @@ linters:
- linters: - linters:
- gosec - gosec
text: 'G402: TLS MinVersion too low.' text: 'G402: TLS MinVersion too low.'
paths:
- third_party$
- builtin$
- examples$
issues: issues:
max-issues-per-linter: 0 max-issues-per-linter: 0
max-same-issues: 0 max-same-issues: 0
@ -237,14 +249,12 @@ formatters:
- goimports - goimports
- golines - golines
settings: settings:
gofumpt:
extra-rules: true
goimports: goimports:
local-prefixes: local-prefixes:
- go.opentelemetry.io - go.opentelemetry.io/otel
golines: golines:
max-len: 120 max-len: 120
exclusions: exclusions:
generated: lax generated: lax
paths:
- third_party$
- builtin$
- examples$

View file

@ -2,5 +2,8 @@ http://localhost
http://jaeger-collector http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects https://github.com/open-telemetry/opentelemetry-go/projects
# Weaver model URL for semantic-conventions repository.
https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
http://4.3.2.1:78/user/123

View file

@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
<!-- Released section --> <!-- Released section -->
<!-- Don't change this section unless doing release --> <!-- Don't change this section unless doing release -->
## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
This release is the last to support [Go 1.23].
The next release will require at least [Go 1.24].
### Added
- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772)
- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939)
- `ContainerLabel`
- `DBOperationParameter`
- `DBSystemParameter`
- `HTTPRequestHeader`
- `HTTPResponseHeader`
- `K8SCronJobAnnotation`
- `K8SCronJobLabel`
- `K8SDaemonSetAnnotation`
- `K8SDaemonSetLabel`
- `K8SDeploymentAnnotation`
- `K8SDeploymentLabel`
- `K8SJobAnnotation`
- `K8SJobLabel`
- `K8SNamespaceAnnotation`
- `K8SNamespaceLabel`
- `K8SNodeAnnotation`
- `K8SNodeLabel`
- `K8SPodAnnotation`
- `K8SPodLabel`
- `K8SReplicaSetAnnotation`
- `K8SReplicaSetLabel`
- `K8SStatefulSetAnnotation`
- `K8SStatefulSetLabel`
- `ProcessEnvironmentVariable`
- `RPCConnectRPCRequestMetadata`
- `RPCConnectRPCResponseMetadata`
- `RPCGRPCRequestMetadata`
- `RPCGRPCResponseMetadata`
- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962)
- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968)
- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179)
- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001)
- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`.
Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209)
- The `go.opentelemetry.io/otel/semconv/v1.36.0` package.
The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041)
- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111)
- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`.
Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121)
- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`.
Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133)
- Support testing of [Go 1.25]. (#7187)
- The `go.opentelemetry.io/otel/semconv/v1.37.0` package.
The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254)
### Changed
- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791)
- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908)
- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094)
### Fixed
- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002)
- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088)
- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195)
- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199)
### Deprecated
- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111)
- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166)
## [0.59.1] 2025-07-21
### Changed
- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046)
- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled.
It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044)
### Fixed
- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets.
E.g. `{spans}`. (#7044)
## [1.37.0/0.59.0/0.13.0] 2025-06-25 ## [1.37.0/0.59.0/0.13.0] 2025-06-25
### Added ### Added
@ -3343,7 +3430,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files. - CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project. - CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
@ -3439,6 +3527,7 @@ It contains api and sdk for trace and meter.
<!-- Released section ended --> <!-- Released section ended -->
[Go 1.25]: https://go.dev/doc/go1.25
[Go 1.24]: https://go.dev/doc/go1.24 [Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22 [Go 1.22]: https://go.dev/doc/go1.22

View file

@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners # https://help.github.com/en/articles/about-code-owners
# #
* @MrAlias @XSAM @dashpole @pellared @dmathieu * @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu

View file

@ -192,6 +192,35 @@ should have `go test -bench` output in their description.
should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
output in their description. output in their description.
## Dependencies
This project uses [Go Modules] for dependency management. All modules will use
`go.mod` to explicitly list all direct and indirect dependencies, ensuring a
clear dependency graph. The `go.sum` file for each module will be committed to
the repository and used to verify the integrity of downloaded modules,
preventing malicious tampering.
This project uses automated dependency update tools (i.e. dependabot,
renovatebot) to manage updates to dependencies. This ensures that dependencies
are kept up-to-date with the latest security patches and features and are
reviewed before being merged. If you would like to propose a change to a
dependency it should be done through a pull request that updates the `go.mod`
file and includes a description of the change.
See the [versioning and compatibility](./VERSIONING.md) policy for more details
about dependency compatibility.
[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more
### Environment Dependencies
This project does not partition dependencies based on the environment (i.e.
`development`, `staging`, `production`).
Only the dependencies explicitly included in the released modules have be
tested and verified to work with the released code. No other guarantee is made
about the compatibility of other dependencies.
## Documentation ## Documentation
Each (non-internal, non-test) package must be documented using Each (non-internal, non-test) package must be documented using
@ -233,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices
the [Effective Go](https://golang.org/doc/effective_go.html) documentation the [Effective Go](https://golang.org/doc/effective_go.html) documentation
is an excellent starting place. is an excellent starting place.
We also recommend following the
[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
that collects common comments made during reviews of Go code.
As a convenience for developers building this project the `make precommit` As a convenience for developers building this project the `make precommit`
will format, lint, validate, and in some cases fix the changes you plan to will format, lint, validate, and in some cases fix the changes you plan to
submit. This check will need to pass for your changes to be able to be submit. This check will need to pass for your changes to be able to be
@ -586,6 +619,10 @@ See also:
### Testing ### Testing
We allow using [`testify`](https://github.com/stretchr/testify) even though
it is seen as non-idiomatic according to
the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page.
The tests should never leak goroutines. The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the Use the term `ConcurrentSafe` in the test name when it aims to verify the
@ -640,13 +677,6 @@ should be canceled.
## Approvers and Maintainers ## Approvers and Maintainers
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
### Approvers
### Maintainers ### Maintainers
- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) - [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
@ -655,6 +685,21 @@ should be canceled.
- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) - [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) - [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
### Approvers
- [Flc](https://github.com/flc1125), Independent
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
### Emeritus ### Emeritus
- [Aaron Clawson](https://github.com/MadVikingGod) - [Aaron Clawson](https://github.com/MadVikingGod)
@ -665,6 +710,8 @@ should be canceled.
- [Josh MacDonald](https://github.com/jmacd) - [Josh MacDonald](https://github.com/jmacd)
- [Liz Fong-Jones](https://github.com/lizthegrey) - [Liz Fong-Jones](https://github.com/lizthegrey)
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
### Become an Approver or a Maintainer ### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community See the [community membership document in OpenTelemetry community

View file

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
MULTIMOD = $(TOOLS)/multimod MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
SEMCONVGEN = $(TOOLS)/semconvgen
$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
CROSSLINK = $(TOOLS)/crosslink CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools .PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker # Virtualized python tools via docker
@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT)
docker run --rm \ docker run --rm \
-u $(DOCKER_USER) \ -u $(DOCKER_USER) \
--env HOME=/tmp/weaver \ --env HOME=/tmp/weaver \
--mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \
--mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
--mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
$(WEAVER_IMAGE) registry generate \ $(WEAVER_IMAGE) registry generate \

View file

@ -53,18 +53,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture | | OS | Go Version | Architecture |
|----------|------------|--------------| |----------|------------|--------------|
| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 | | Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 | | Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 | | Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 | | Ubuntu | 1.23 | 386 |
| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 | | Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 | | Ubuntu | 1.23 | arm64 |
| macOS 13 | 1.25 | amd64 |
| macOS 13 | 1.24 | amd64 | | macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 | | macOS 13 | 1.23 | amd64 |
| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 | | macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 | | macOS | 1.23 | arm64 |
| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 | | Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 | | Windows | 1.23 | amd64 |
| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 | | Windows | 1.24 | 386 |
| Windows | 1.23 | 386 | | Windows | 1.23 | 386 |

203
vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml generated vendored Normal file
View file

@ -0,0 +1,203 @@
header:
schema-version: "1.0.0"
expiration-date: "2026-08-04T00:00:00.000Z"
last-updated: "2025-08-04"
last-reviewed: "2025-08-04"
commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8
project-url: https://github.com/open-telemetry/opentelemetry-go
project-release: "v1.37.0"
changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md
license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE
project-lifecycle:
status: active
bug-fixes-only: false
core-maintainers:
- https://github.com/dmathieu
- https://github.com/dashpole
- https://github.com/pellared
- https://github.com/XSAM
- https://github.com/MrAlias
release-process: |
See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md
contribution-policy:
accepts-pull-requests: true
accepts-automated-pull-requests: true
automated-tools-list:
- automated-tool: dependabot
action: allowed
comment: Automated dependency updates are accepted.
- automated-tool: renovatebot
action: allowed
comment: Automated dependency updates are accepted.
- automated-tool: opentelemetrybot
action: allowed
comment: Automated OpenTelemetry actions are accepted.
contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md
documentation:
- https://pkg.go.dev/go.opentelemetry.io/otel
- https://opentelemetry.io/docs/instrumentation/go/
distribution-points:
- pkg:golang/go.opentelemetry.io/otel
- pkg:golang/go.opentelemetry.io/otel/bridge/opencensus
- pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test
- pkg:golang/go.opentelemetry.io/otel/bridge/opentracing
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- pkg:golang/go.opentelemetry.io/otel/exporters/zipkin
- pkg:golang/go.opentelemetry.io/otel/metric
- pkg:golang/go.opentelemetry.io/otel/sdk
- pkg:golang/go.opentelemetry.io/otel/sdk/metric
- pkg:golang/go.opentelemetry.io/otel/trace
- pkg:golang/go.opentelemetry.io/otel/exporters/prometheus
- pkg:golang/go.opentelemetry.io/otel/log
- pkg:golang/go.opentelemetry.io/otel/log/logtest
- pkg:golang/go.opentelemetry.io/otel/sdk/log
- pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog
- pkg:golang/go.opentelemetry.io/otel/schema
security-artifacts:
threat-model:
threat-model-created: false
comment: |
No formal threat model created yet.
self-assessment:
self-assessment-created: false
comment: |
No formal self-assessment yet.
security-testing:
- tool-type: sca
tool-name: Dependabot
tool-version: latest
tool-url: https://github.com/dependabot
tool-rulesets:
- built-in
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
Automated dependency updates.
- tool-type: sast
tool-name: golangci-lint
tool-version: latest
tool-url: https://github.com/golangci/golangci-lint
tool-rulesets:
- built-in
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
Static analysis in CI.
- tool-type: fuzzing
tool-name: OSS-Fuzz
tool-version: latest
tool-url: https://github.com/google/oss-fuzz
tool-rulesets:
- default
integration:
ad-hoc: false
ci: false
before-release: false
comment: |
OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details.
- tool-type: sast
tool-name: CodeQL
tool-version: latest
tool-url: https://github.com/github/codeql
tool-rulesets:
- default
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details.
- tool-type: sca
tool-name: govulncheck
tool-version: latest
tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck
tool-rulesets:
- default
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration.
security-assessments:
- auditor-name: 7ASecurity
auditor-url: https://7asecurity.com
auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf
report-year: 2023
comment: |
This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository.
security-contacts:
- type: email
value: cncf-opentelemetry-security@lists.cncf.io
primary: true
- type: website
value: https://github.com/open-telemetry/opentelemetry-go/security/policy
primary: false
vulnerability-reporting:
accepts-vulnerability-reports: true
email-contact: cncf-opentelemetry-security@lists.cncf.io
security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy
comment: |
Security issues should be reported via email or GitHub security policy page.
dependencies:
third-party-packages: true
dependencies-lists:
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod
dependencies-lifecycle:
policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
comment: |
Dependency lifecycle managed via go.mod and renovatebot.
env-dependencies-policy:
policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
comment: |
See contributing policy for environment usage.

View file

@ -78,7 +78,7 @@ func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() { defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultAttrEncoder{ defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{ pool: sync.Pool{
New: func() interface{} { New: func() any {
return &bytes.Buffer{} return &bytes.Buffer{}
}, },
}, },
@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string {
for iter.Next() { for iter.Next() {
i, keyValue := iter.IndexedAttribute() i, keyValue := iter.IndexedAttribute()
if i > 0 { if i > 0 {
_, _ = buf.WriteRune(',') _ = buf.WriteByte(',')
} }
copyAndEscape(buf, string(keyValue.Key)) copyAndEscape(buf, string(keyValue.Key))
_, _ = buf.WriteRune('=') _ = buf.WriteByte('=')
if keyValue.Value.Type() == STRING { if keyValue.Value.Type() == STRING {
copyAndEscape(buf, keyValue.Value.AsString()) copyAndEscape(buf, keyValue.Value.AsString())
@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val { for _, ch := range val {
switch ch { switch ch {
case '=', ',', escapeChar: case '=', ',', escapeChar:
_, _ = buf.WriteRune(escapeChar) _ = buf.WriteByte(escapeChar)
} }
_, _ = buf.WriteRune(ch) _, _ = buf.WriteRune(ch)
} }
} }
// Valid returns true if this encoder ID was allocated by // Valid reports whether this encoder ID was allocated by
// `NewEncoderID`. Invalid encoder IDs will not be cached. // [NewEncoderID]. Invalid encoder IDs will not be cached.
func (id EncoderID) Valid() bool { func (id EncoderID) Valid() bool {
return id.value != 0 return id.value != 0
} }

View file

@ -15,8 +15,8 @@ type Filter func(KeyValue) bool
// //
// If keys is empty a deny-all filter is returned. // If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter { func NewAllowKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 { if len(keys) == 0 {
return func(kv KeyValue) bool { return false } return func(KeyValue) bool { return false }
} }
allowed := make(map[Key]struct{}, len(keys)) allowed := make(map[Key]struct{}, len(keys))
@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter {
// //
// If keys is empty an allow-all filter is returned. // If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter { func NewDenyKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 { if len(keys) == 0 {
return func(kv KeyValue) bool { return true } return func(KeyValue) bool { return true }
} }
forbid := make(map[Key]struct{}, len(keys)) forbid := make(map[Key]struct{}, len(keys))

View file

@ -12,7 +12,7 @@ import (
) )
// BoolSliceValue converts a bool slice into an array with same elements as slice. // BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) interface{} { func BoolSliceValue(v []bool) any {
var zero bool var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} {
} }
// Int64SliceValue converts an int64 slice into an array with same elements as slice. // Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) interface{} { func Int64SliceValue(v []int64) any {
var zero int64 var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} {
} }
// Float64SliceValue converts a float64 slice into an array with same elements as slice. // Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) interface{} { func Float64SliceValue(v []float64) any {
var zero float64 var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} {
} }
// StringSliceValue converts a string slice into an array with same elements as slice. // StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) interface{} { func StringSliceValue(v []string) any {
var zero string var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} {
} }
// AsBoolSlice converts a bool array into a slice into with same elements as array. // AsBoolSlice converts a bool array into a slice into with same elements as array.
func AsBoolSlice(v interface{}) []bool { func AsBoolSlice(v any) []bool {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil
@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool {
} }
// AsInt64Slice converts an int64 array into a slice into with same elements as array. // AsInt64Slice converts an int64 array into a slice into with same elements as array.
func AsInt64Slice(v interface{}) []int64 { func AsInt64Slice(v any) []int64 {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil
@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 {
} }
// AsFloat64Slice converts a float64 array into a slice into with same elements as array. // AsFloat64Slice converts a float64 array into a slice into with same elements as array.
func AsFloat64Slice(v interface{}) []float64 { func AsFloat64Slice(v any) []float64 {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil
@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 {
} }
// AsStringSlice converts a string array into a slice into with same elements as array. // AsStringSlice converts a string array into a slice into with same elements as array.
func AsStringSlice(v interface{}) []string { func AsStringSlice(v any) []string {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil

View file

@ -25,8 +25,8 @@ type oneIterator struct {
attr KeyValue attr KeyValue
} }
// Next moves the iterator to the next position. Returns false if there are no // Next moves the iterator to the next position.
// more attributes. // Next reports whether there are more attributes.
func (i *Iterator) Next() bool { func (i *Iterator) Next() bool {
i.idx++ i.idx++
return i.idx < i.Len() return i.idx < i.Len()
@ -106,7 +106,8 @@ func (oi *oneIterator) advance() {
} }
} }
// Next returns true if there is another attribute available. // Next moves the iterator to the next position.
// Next reports whether there is another attribute available.
func (m *MergeIterator) Next() bool { func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done { if m.one.done && m.two.done {
return false return false

View file

@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue {
} }
} }
// Defined returns true for non-empty keys. // Defined reports whether the key is not empty.
func (k Key) Defined() bool { func (k Key) Defined() bool {
return len(k) != 0 return len(k) != 0
} }

View file

@ -13,7 +13,7 @@ type KeyValue struct {
Value Value Value Value
} }
// Valid returns if kv is a valid OpenTelemetry attribute. // Valid reports whether kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool { func (kv KeyValue) Valid() bool {
return kv.Key.Defined() && kv.Value.Type() != INVALID return kv.Key.Defined() && kv.Value.Type() != INVALID
} }

View file

@ -31,11 +31,11 @@ type (
// Distinct is a unique identifier of a Set. // Distinct is a unique identifier of a Set.
// //
// Distinct is designed to be ensures equivalence stability: comparisons // Distinct is designed to ensure equivalence stability: comparisons will
// will return the save value across versions. For this reason, Distinct // return the same value across versions. For this reason, Distinct should
// should always be used as a map key instead of a Set. // always be used as a map key instead of a Set.
Distinct struct { Distinct struct {
iface interface{} iface any
} }
// Sortable implements sort.Interface, used for sorting KeyValue. // Sortable implements sort.Interface, used for sorting KeyValue.
@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface) return reflect.ValueOf(d.iface)
} }
// Valid returns true if this value refers to a valid Set. // Valid reports whether this value refers to a valid Set.
func (d Distinct) Valid() bool { func (d Distinct) Valid() bool {
return d.iface != nil return d.iface != nil
} }
@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) {
return Value{}, false return Value{}, false
} }
// HasValue tests whether a key is defined in this set. // HasValue reports whether a key is defined in this set.
func (l *Set) HasValue(k Key) bool { func (l *Set) HasValue(k Key) bool {
if l == nil { if l == nil {
return false return false
@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct {
return l.equivalent return l.equivalent
} }
// Equals returns true if the argument set is equivalent to this set. // Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool { func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent() return l.Equivalent() == o.Equivalent()
} }
@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct {
// computeDistinctFixed computes a Distinct for small slices. It returns nil // computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path. // if the input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) interface{} { func computeDistinctFixed(kvs []KeyValue) any {
switch len(kvs) { switch len(kvs) {
case 1: case 1:
return [1]KeyValue(kvs) return [1]KeyValue(kvs)
@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
// computeDistinctReflect computes a Distinct using reflection, works for any // computeDistinctReflect computes a Distinct using reflection, works for any
// size input. // size input.
func computeDistinctReflect(kvs []KeyValue) interface{} { func computeDistinctReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs { for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
} }
// MarshalLog is the marshaling function used by the logging system to represent this Set. // MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} { func (l Set) MarshalLog() any {
kvs := make(map[string]string) kvs := make(map[string]string)
for _, kv := range l.ToSlice() { for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit() kvs[string(kv.Key)] = kv.Value.Emit()

View file

@ -22,7 +22,7 @@ type Value struct {
vtype Type vtype Type
numeric uint64 numeric uint64
stringly string stringly string
slice interface{} slice any
} }
const ( const (
@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string {
type unknownValueType struct{} type unknownValueType struct{}
// AsInterface returns Value's data as interface{}. // AsInterface returns Value's data as any.
func (v Value) AsInterface() interface{} { func (v Value) AsInterface() any {
switch v.Type() { switch v.Type() {
case BOOL: case BOOL:
return v.AsBool() return v.AsBool()
@ -262,7 +262,7 @@ func (v Value) Emit() string {
func (v Value) MarshalJSON() ([]byte, error) { func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct { var jsonVal struct {
Type string Type string
Value interface{} Value any
} }
jsonVal.Type = v.Type().String() jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface() jsonVal.Value = v.AsInterface()

Some files were not shown because too many files have changed in this diff Show more