mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-10-28 15:42:24 -05:00
[chore] bump dependencies (#4339)
- github.com/KimMachineGun/automemlimit v0.7.4 - github.com/miekg/dns v1.1.67 - github.com/minio/minio-go/v7 v7.0.95 - github.com/spf13/pflag v1.0.7 - github.com/tdewolff/minify/v2 v2.23.9 - github.com/uptrace/bun v1.2.15 - github.com/uptrace/bun/dialect/pgdialect v1.2.15 - github.com/uptrace/bun/dialect/sqlitedialect v1.2.15 - github.com/uptrace/bun/extra/bunotel v1.2.15 - golang.org/x/image v0.29.0 - golang.org/x/net v0.42.0 Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4339 Co-authored-by: kim <grufwub@gmail.com> Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
eb60081985
commit
c00cad2ceb
76 changed files with 5544 additions and 886 deletions
34
go.mod
34
go.mod
|
|
@ -34,7 +34,7 @@ require (
|
|||
codeberg.org/gruf/go-storage v0.3.1
|
||||
codeberg.org/gruf/go-structr v0.9.7
|
||||
github.com/DmitriyVTitov/size v1.5.0
|
||||
github.com/KimMachineGun/automemlimit v0.7.3
|
||||
github.com/KimMachineGun/automemlimit v0.7.4
|
||||
github.com/SherClockHolmes/webpush-go v1.4.0
|
||||
github.com/buckket/go-blurhash v1.1.0
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
|
|
@ -51,8 +51,8 @@ require (
|
|||
github.com/jackc/pgx/v5 v5.7.5
|
||||
github.com/k3a/html2text v1.2.1
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/miekg/dns v1.1.66
|
||||
github.com/minio/minio-go/v7 v7.0.94
|
||||
github.com/miekg/dns v1.1.67
|
||||
github.com/minio/minio-go/v7 v7.0.95
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/ncruces/go-sqlite3 v0.27.1
|
||||
github.com/oklog/ulid v1.3.1
|
||||
|
|
@ -60,19 +60,19 @@ require (
|
|||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/spf13/cast v1.9.2
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tdewolff/minify/v2 v2.23.8
|
||||
github.com/tdewolff/minify/v2 v2.23.9
|
||||
github.com/technologize/otel-go-contrib v1.1.1
|
||||
github.com/temoto/robotstxt v1.1.2
|
||||
github.com/tetratelabs/wazero v1.9.0
|
||||
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
|
||||
github.com/ulule/limiter/v3 v3.11.2
|
||||
github.com/uptrace/bun v1.2.14
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.14
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.14
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.14
|
||||
github.com/uptrace/bun v1.2.15
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.15
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.15
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.15
|
||||
github.com/wagslane/go-password-validator v0.3.0
|
||||
github.com/yuin/goldmark v1.7.12
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.62.0
|
||||
|
|
@ -84,8 +84,8 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/image v0.28.0
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/image v0.29.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/text v0.27.0
|
||||
|
|
@ -153,6 +153,7 @@ require (
|
|||
github.com/gorilla/handlers v1.5.2 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gorilla/sessions v1.4.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
|
|
@ -165,13 +166,13 @@ require (
|
|||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/minio/crc64nvme v1.0.2 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
|
|
@ -182,13 +183,14 @@ require (
|
|||
github.com/ncruces/julianday v1.0.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
|
|
@ -218,7 +220,7 @@ require (
|
|||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 // indirect
|
||||
|
|
|
|||
67
go.sum
generated
67
go.sum
generated
|
|
@ -58,8 +58,8 @@ codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix h1:k76/Th+br
|
|||
codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix/go.mod h1:lAwO1nKff3qNRJYVQeTCl1am5pcNiiA2VyDf8TqzS24=
|
||||
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
|
||||
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
|
||||
github.com/KimMachineGun/automemlimit v0.7.3 h1:oPgMp0bsWez+4fvgSa11Rd9nUDrd8RLtDjBoT3ro+/A=
|
||||
github.com/KimMachineGun/automemlimit v0.7.3/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
||||
github.com/KimMachineGun/automemlimit v0.7.4 h1:UY7QYOIfrr3wjjOAqahFmC3IaQCLWvur9nmfIn6LnWk=
|
||||
github.com/KimMachineGun/automemlimit v0.7.4/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
|
|
@ -249,6 +249,8 @@ github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzq
|
|||
github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
|
|
@ -286,8 +288,8 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
|||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU=
|
||||
github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
|
|
@ -305,14 +307,14 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
|
||||
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
||||
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
|
||||
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/miekg/dns v1.1.67 h1:kg0EHj0G4bfT5/oOys6HhZw4vmMlnoZ+gDu8tJ/AlI0=
|
||||
github.com/miekg/dns v1.1.67/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
|
||||
github.com/minio/crc64nvme v1.0.2 h1:6uO1UxGAD+kwqWWp7mBFsi5gAse66C4NXO8cmcVculg=
|
||||
github.com/minio/crc64nvme v1.0.2/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM=
|
||||
github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc=
|
||||
github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU=
|
||||
github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
|
|
@ -344,8 +346,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D
|
|||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
@ -362,8 +364,10 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
|
|||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f h1:QQB6SuvGZjK8kdc2YaLJpYhV8fxauOsjE6jgcL6YJ8Q=
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
|
||||
|
|
@ -402,8 +406,9 @@ github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
|
|||
github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
|
@ -420,8 +425,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
|
|||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tdewolff/minify/v2 v2.23.8 h1:tvjHzRer46kwOfpdCBCWsDblCw3QtnLJRd61pTVkyZ8=
|
||||
github.com/tdewolff/minify/v2 v2.23.8/go.mod h1:VW3ISUd3gDOZuQ/jwZr4sCzsuX+Qvsx87FDMjk6Rvno=
|
||||
github.com/tdewolff/minify/v2 v2.23.9 h1:s8hX6wQzOqmanyLxmlynInRPVgZ/xASy6sUHfGsW6kU=
|
||||
github.com/tdewolff/minify/v2 v2.23.9/go.mod h1:VW3ISUd3gDOZuQ/jwZr4sCzsuX+Qvsx87FDMjk6Rvno=
|
||||
github.com/tdewolff/parse/v2 v2.8.1 h1:J5GSHru6o3jF1uLlEKVXkDxxcVx6yzOlIVIotK4w2po=
|
||||
github.com/tdewolff/parse/v2 v2.8.1/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo=
|
||||
github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE=
|
||||
|
|
@ -462,14 +467,14 @@ github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA
|
|||
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
||||
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
|
||||
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
|
||||
github.com/uptrace/bun v1.2.14 h1:5yFSfi/yVWEzQ2lAaHz+JfWN9AHmqYtNmlbaUbAp3rU=
|
||||
github.com/uptrace/bun v1.2.14/go.mod h1:ZS4nPaEv2Du3OFqAD/irk3WVP6xTB3/9TWqjJbgKYBU=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.14 h1:1jmCn7zcYIJDSk1pJO//b11k9NQP1rpWZoyxfoNdpzI=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.14/go.mod h1:MrRlsIpWIyOCNosWuG8bVtLb80JyIER5ci0VlTa38dU=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.14 h1:eLXmNpy2TSsWJNpyIIIeLBa5M+Xxc4n8jX5ASeuvWrg=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.14/go.mod h1:oORBd9Y7RiAOHAshjuebSFNPZNPLXYcvEWmibuJ8RRk=
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.14 h1:LPg/1kEOcwex5w7+Boh6Rdc3xi1PuMVZV06isOPEPaU=
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.14/go.mod h1:V509v+akUAx31NbN96WEhkY+rBPJxI0Ul+beKNN1Ato=
|
||||
github.com/uptrace/bun v1.2.15 h1:Ut68XRBLDgp9qG9QBMa9ELWaZOmzHNdczHQdrOZbEFE=
|
||||
github.com/uptrace/bun v1.2.15/go.mod h1:Eghz7NonZMiTX/Z6oKYytJ0oaMEJ/eq3kEV4vSqG038=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.15 h1:er+/3giAIqpfrXJw+KP9B7ujyQIi5XkPnFmgjAVL6bA=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.2.15/go.mod h1:QSiz6Qpy9wlGFsfpf7UMSL6mXAL1jDJhFwuOVacCnOQ=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.15 h1:7upGMVjFRB1oI78GQw6ruNLblYn5CR+kxqcbbeBBils=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.2.15/go.mod h1:c7YIDaPNS2CU2uI1p7umFuFWkuKbDcPDDvp+DLHZnkI=
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.15 h1:6KAvKRpH9BC/7n3eMXVgDYLqghHf2H3FJOvxs/yjFJM=
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.15/go.mod h1:qnASdcJVuoEE+13N3Gd8XHi5gwCydt2S1TccJnefH2k=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
|
|
@ -527,8 +532,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWS
|
|||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0 h1:HHf+wKS6o5++XZhS98wvILrLVgHxjA/AMjqHKes+uzo=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.0/go.mod h1:R8GpRXTZrqvXHDEGVH5bF6+JqAZcK8PjJcZ5nGhEWiE=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.1 h1:HcpSkTkJbggT8bjYP+BjyqPWlD17BH9C5CYNKeDzmcA=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.59.1/go.mod h1:0FJL+gjuUoM07xzik3KPBaN+nz/CoB15kV6WLMiXZag=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0 h1:yEX3aC9KDgvYPhuKECHbOlr5GLwH6KTjLJ1sBSkkxkc=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0/go.mod h1:/GXR0tBmmkxDaCUGahvksvp66mx4yh5+cFXgSlhg0vQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 h1:6VjV6Et+1Hd2iLZEPtdV7vie80Yyqf7oikJLjQ/myi0=
|
||||
|
|
@ -570,8 +575,8 @@ golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
|||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||
golang.org/x/image v0.28.0 h1:gdem5JW1OLS4FbkWgLO+7ZeFzYtL3xClb97GaUzYMFE=
|
||||
golang.org/x/image v0.28.0/go.mod h1:GUJYXtnGKEUgggyzh+Vxt+AviiCcyiwpsl8iQ8MvwGY=
|
||||
golang.org/x/image v0.29.0 h1:HcdsyR4Gsuys/Axh0rDEmlBmB68rW1U9BUdB3UVHsas=
|
||||
golang.org/x/image v0.29.0/go.mod h1:RVJROnf3SLK8d26OW91j4FrIHGbsJ8QnbEocVTOWQDA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
|
|
@ -594,8 +599,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
|||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
|
|||
12
vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go
generated
vendored
12
vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go
generated
vendored
|
|
@ -157,7 +157,7 @@ func getMemoryLimitV1(chs []cgroupHierarchy, mis []mountInfo) (uint64, error) {
|
|||
return 0, err
|
||||
}
|
||||
|
||||
// retrieve the memory limit from the memory.stats and memory.limit_in_bytes files.
|
||||
// retrieve the memory limit from the memory.stat and memory.limit_in_bytes files.
|
||||
return readMemoryLimitV1FromPath(cgroupPath)
|
||||
}
|
||||
|
||||
|
|
@ -173,7 +173,7 @@ func getCgroupV1NoLimit() uint64 {
|
|||
func readMemoryLimitV1FromPath(cgroupPath string) (uint64, error) {
|
||||
// read hierarchical_memory_limit and memory.limit_in_bytes files.
|
||||
// but if hierarchical_memory_limit is not available, then use the max value as a fallback.
|
||||
hml, err := readHierarchicalMemoryLimit(filepath.Join(cgroupPath, "memory.stats"))
|
||||
hml, err := readHierarchicalMemoryLimit(filepath.Join(cgroupPath, "memory.stat"))
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return 0, fmt.Errorf("failed to read hierarchical_memory_limit: %w", err)
|
||||
} else if hml == 0 {
|
||||
|
|
@ -202,8 +202,8 @@ func readMemoryLimitV1FromPath(cgroupPath string) (uint64, error) {
|
|||
return limit, nil
|
||||
}
|
||||
|
||||
// readHierarchicalMemoryLimit extracts hierarchical_memory_limit from memory.stats.
|
||||
// this function expects the path to be memory.stats file.
|
||||
// readHierarchicalMemoryLimit extracts hierarchical_memory_limit from memory.stat.
|
||||
// this function expects the path to be memory.stat file.
|
||||
func readHierarchicalMemoryLimit(path string) (uint64, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
|
|
@ -217,12 +217,12 @@ func readHierarchicalMemoryLimit(path string) (uint64, error) {
|
|||
|
||||
fields := strings.Split(line, " ")
|
||||
if len(fields) < 2 {
|
||||
return 0, fmt.Errorf("failed to parse memory.stats %q: not enough fields", line)
|
||||
return 0, fmt.Errorf("failed to parse memory.stat %q: not enough fields", line)
|
||||
}
|
||||
|
||||
if fields[0] == "hierarchical_memory_limit" {
|
||||
if len(fields) > 2 {
|
||||
return 0, fmt.Errorf("failed to parse memory.stats %q: too many fields for hierarchical_memory_limit", line)
|
||||
return 0, fmt.Errorf("failed to parse memory.stat %q: too many fields for hierarchical_memory_limit", line)
|
||||
}
|
||||
return strconv.ParseUint(fields[1], 10, 64)
|
||||
}
|
||||
|
|
|
|||
15
vendor/github.com/grafana/regexp/.gitignore
generated
vendored
Normal file
15
vendor/github.com/grafana/regexp/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
27
vendor/github.com/grafana/regexp/LICENSE
generated
vendored
Normal file
27
vendor/github.com/grafana/regexp/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
12
vendor/github.com/grafana/regexp/README.md
generated
vendored
Normal file
12
vendor/github.com/grafana/regexp/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
# Grafana Go regexp package
|
||||
This repo is a fork of the upstream Go `regexp` package, with some code optimisations to make it run faster.
|
||||
|
||||
All the optimisations have been submitted upstream, but not yet merged.
|
||||
|
||||
All semantics are the same, and the optimised code passes all tests from upstream.
|
||||
|
||||
The `main` branch is non-optimised: switch over to [`speedup`](https://github.com/grafana/regexp/tree/speedup) branch for the improved code.
|
||||
|
||||
## Benchmarks:
|
||||
|
||||

|
||||
365
vendor/github.com/grafana/regexp/backtrack.go
generated
vendored
Normal file
365
vendor/github.com/grafana/regexp/backtrack.go
generated
vendored
Normal file
|
|
@ -0,0 +1,365 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// backtrack is a regular expression search with submatch
|
||||
// tracking for small regular expressions and texts. It allocates
|
||||
// a bit vector with (length of input) * (length of prog) bits,
|
||||
// to make sure it never explores the same (character position, instruction)
|
||||
// state multiple times. This limits the search to run in time linear in
|
||||
// the length of the test.
|
||||
//
|
||||
// backtrack is a fast replacement for the NFA code on small
|
||||
// regexps when onepass cannot be used.
|
||||
|
||||
package regexp
|
||||
|
||||
import (
|
||||
"regexp/syntax"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A job is an entry on the backtracker's job stack. It holds
|
||||
// the instruction pc and the position in the input.
|
||||
type job struct {
|
||||
pc uint32
|
||||
arg bool
|
||||
pos int
|
||||
}
|
||||
|
||||
const (
|
||||
visitedBits = 32
|
||||
maxBacktrackProg = 500 // len(prog.Inst) <= max
|
||||
maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits)
|
||||
)
|
||||
|
||||
// bitState holds state for the backtracker.
|
||||
type bitState struct {
|
||||
end int
|
||||
cap []int
|
||||
matchcap []int
|
||||
jobs []job
|
||||
visited []uint32
|
||||
|
||||
inputs inputs
|
||||
}
|
||||
|
||||
var bitStatePool sync.Pool
|
||||
|
||||
func newBitState() *bitState {
|
||||
b, ok := bitStatePool.Get().(*bitState)
|
||||
if !ok {
|
||||
b = new(bitState)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func freeBitState(b *bitState) {
|
||||
b.inputs.clear()
|
||||
bitStatePool.Put(b)
|
||||
}
|
||||
|
||||
// maxBitStateLen returns the maximum length of a string to search with
|
||||
// the backtracker using prog.
|
||||
func maxBitStateLen(prog *syntax.Prog) int {
|
||||
if !shouldBacktrack(prog) {
|
||||
return 0
|
||||
}
|
||||
return maxBacktrackVector / len(prog.Inst)
|
||||
}
|
||||
|
||||
// shouldBacktrack reports whether the program is too
|
||||
// long for the backtracker to run.
|
||||
func shouldBacktrack(prog *syntax.Prog) bool {
|
||||
return len(prog.Inst) <= maxBacktrackProg
|
||||
}
|
||||
|
||||
// reset resets the state of the backtracker.
|
||||
// end is the end position in the input.
|
||||
// ncap is the number of captures.
|
||||
func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) {
|
||||
b.end = end
|
||||
|
||||
if cap(b.jobs) == 0 {
|
||||
b.jobs = make([]job, 0, 256)
|
||||
} else {
|
||||
b.jobs = b.jobs[:0]
|
||||
}
|
||||
|
||||
visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits
|
||||
if cap(b.visited) < visitedSize {
|
||||
b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits)
|
||||
} else {
|
||||
b.visited = b.visited[:visitedSize]
|
||||
clear(b.visited) // set to 0
|
||||
}
|
||||
|
||||
if cap(b.cap) < ncap {
|
||||
b.cap = make([]int, ncap)
|
||||
} else {
|
||||
b.cap = b.cap[:ncap]
|
||||
}
|
||||
for i := range b.cap {
|
||||
b.cap[i] = -1
|
||||
}
|
||||
|
||||
if cap(b.matchcap) < ncap {
|
||||
b.matchcap = make([]int, ncap)
|
||||
} else {
|
||||
b.matchcap = b.matchcap[:ncap]
|
||||
}
|
||||
for i := range b.matchcap {
|
||||
b.matchcap[i] = -1
|
||||
}
|
||||
}
|
||||
|
||||
// shouldVisit reports whether the combination of (pc, pos) has not
|
||||
// been visited yet.
|
||||
func (b *bitState) shouldVisit(pc uint32, pos int) bool {
|
||||
n := uint(int(pc)*(b.end+1) + pos)
|
||||
if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 {
|
||||
return false
|
||||
}
|
||||
b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1))
|
||||
return true
|
||||
}
|
||||
|
||||
// push pushes (pc, pos, arg) onto the job stack if it should be
|
||||
// visited.
|
||||
func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) {
|
||||
// Only check shouldVisit when arg is false.
|
||||
// When arg is true, we are continuing a previous visit.
|
||||
if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) {
|
||||
b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos})
|
||||
}
|
||||
}
|
||||
|
||||
// tryBacktrack runs a backtracking search starting at pos.
|
||||
func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
|
||||
longest := re.longest
|
||||
|
||||
b.push(re, pc, pos, false)
|
||||
for len(b.jobs) > 0 {
|
||||
l := len(b.jobs) - 1
|
||||
// Pop job off the stack.
|
||||
pc := b.jobs[l].pc
|
||||
pos := b.jobs[l].pos
|
||||
arg := b.jobs[l].arg
|
||||
b.jobs = b.jobs[:l]
|
||||
|
||||
// Optimization: rather than push and pop,
|
||||
// code that is going to Push and continue
|
||||
// the loop simply updates ip, p, and arg
|
||||
// and jumps to CheckAndLoop. We have to
|
||||
// do the ShouldVisit check that Push
|
||||
// would have, but we avoid the stack
|
||||
// manipulation.
|
||||
goto Skip
|
||||
CheckAndLoop:
|
||||
if !b.shouldVisit(pc, pos) {
|
||||
continue
|
||||
}
|
||||
Skip:
|
||||
|
||||
inst := &re.prog.Inst[pc]
|
||||
|
||||
switch inst.Op {
|
||||
default:
|
||||
panic("bad inst")
|
||||
case syntax.InstFail:
|
||||
panic("unexpected InstFail")
|
||||
case syntax.InstAlt:
|
||||
// Cannot just
|
||||
// b.push(inst.Out, pos, false)
|
||||
// b.push(inst.Arg, pos, false)
|
||||
// If during the processing of inst.Out, we encounter
|
||||
// inst.Arg via another path, we want to process it then.
|
||||
// Pushing it here will inhibit that. Instead, re-push
|
||||
// inst with arg==true as a reminder to push inst.Arg out
|
||||
// later.
|
||||
if arg {
|
||||
// Finished inst.Out; try inst.Arg.
|
||||
arg = false
|
||||
pc = inst.Arg
|
||||
goto CheckAndLoop
|
||||
} else {
|
||||
b.push(re, pc, pos, true)
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
}
|
||||
|
||||
case syntax.InstAltMatch:
|
||||
// One opcode consumes runes; the other leads to match.
|
||||
switch re.prog.Inst[inst.Out].Op {
|
||||
case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
|
||||
// inst.Arg is the match.
|
||||
b.push(re, inst.Arg, pos, false)
|
||||
pc = inst.Arg
|
||||
pos = b.end
|
||||
goto CheckAndLoop
|
||||
}
|
||||
// inst.Out is the match - non-greedy
|
||||
b.push(re, inst.Out, b.end, false)
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstRune:
|
||||
r, width := i.step(pos)
|
||||
if !inst.MatchRune(r) {
|
||||
continue
|
||||
}
|
||||
pos += width
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstRune1:
|
||||
r, width := i.step(pos)
|
||||
if r != inst.Rune[0] {
|
||||
continue
|
||||
}
|
||||
pos += width
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstRuneAnyNotNL:
|
||||
r, width := i.step(pos)
|
||||
if r == '\n' || r == endOfText {
|
||||
continue
|
||||
}
|
||||
pos += width
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstRuneAny:
|
||||
r, width := i.step(pos)
|
||||
if r == endOfText {
|
||||
continue
|
||||
}
|
||||
pos += width
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstCapture:
|
||||
if arg {
|
||||
// Finished inst.Out; restore the old value.
|
||||
b.cap[inst.Arg] = pos
|
||||
continue
|
||||
} else {
|
||||
if inst.Arg < uint32(len(b.cap)) {
|
||||
// Capture pos to register, but save old value.
|
||||
b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done.
|
||||
b.cap[inst.Arg] = pos
|
||||
}
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
}
|
||||
|
||||
case syntax.InstEmptyWidth:
|
||||
flag := i.context(pos)
|
||||
if !flag.match(syntax.EmptyOp(inst.Arg)) {
|
||||
continue
|
||||
}
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstNop:
|
||||
pc = inst.Out
|
||||
goto CheckAndLoop
|
||||
|
||||
case syntax.InstMatch:
|
||||
// We found a match. If the caller doesn't care
|
||||
// where the match is, no point going further.
|
||||
if len(b.cap) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Record best match so far.
|
||||
// Only need to check end point, because this entire
|
||||
// call is only considering one start position.
|
||||
if len(b.cap) > 1 {
|
||||
b.cap[1] = pos
|
||||
}
|
||||
if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) {
|
||||
copy(b.matchcap, b.cap)
|
||||
}
|
||||
|
||||
// If going for first match, we're done.
|
||||
if !longest {
|
||||
return true
|
||||
}
|
||||
|
||||
// If we used the entire text, no longer match is possible.
|
||||
if pos == b.end {
|
||||
return true
|
||||
}
|
||||
|
||||
// Otherwise, continue on in hope of a longer match.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0
|
||||
}
|
||||
|
||||
// backtrack runs a backtracking search of prog on the input starting at pos.
|
||||
func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int {
|
||||
startCond := re.cond
|
||||
if startCond == ^syntax.EmptyOp(0) { // impossible
|
||||
return nil
|
||||
}
|
||||
if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
|
||||
// Anchored match, past beginning of text.
|
||||
return nil
|
||||
}
|
||||
|
||||
b := newBitState()
|
||||
i, end := b.inputs.init(nil, ib, is)
|
||||
b.reset(re.prog, end, ncap)
|
||||
|
||||
// Anchored search must start at the beginning of the input
|
||||
if startCond&syntax.EmptyBeginText != 0 {
|
||||
if len(b.cap) > 0 {
|
||||
b.cap[0] = pos
|
||||
}
|
||||
if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
|
||||
freeBitState(b)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
|
||||
// Unanchored search, starting from each possible text position.
|
||||
// Notice that we have to try the empty string at the end of
|
||||
// the text, so the loop condition is pos <= end, not pos < end.
|
||||
// This looks like it's quadratic in the size of the text,
|
||||
// but we are not clearing visited between calls to TrySearch,
|
||||
// so no work is duplicated and it ends up still being linear.
|
||||
width := -1
|
||||
for ; pos <= end && width != 0; pos += width {
|
||||
if len(re.prefix) > 0 {
|
||||
// Match requires literal prefix; fast search for it.
|
||||
advance := i.index(re, pos)
|
||||
if advance < 0 {
|
||||
freeBitState(b)
|
||||
return nil
|
||||
}
|
||||
pos += advance
|
||||
}
|
||||
|
||||
if len(b.cap) > 0 {
|
||||
b.cap[0] = pos
|
||||
}
|
||||
if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
|
||||
// Match must be leftmost; done.
|
||||
goto Match
|
||||
}
|
||||
_, width = i.step(pos)
|
||||
}
|
||||
freeBitState(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
Match:
|
||||
dstCap = append(dstCap, b.matchcap...)
|
||||
freeBitState(b)
|
||||
return dstCap
|
||||
}
|
||||
554
vendor/github.com/grafana/regexp/exec.go
generated
vendored
Normal file
554
vendor/github.com/grafana/regexp/exec.go
generated
vendored
Normal file
|
|
@ -0,0 +1,554 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package regexp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp/syntax"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A queue is a 'sparse array' holding pending threads of execution.
|
||||
// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html
|
||||
type queue struct {
|
||||
sparse []uint32
|
||||
dense []entry
|
||||
}
|
||||
|
||||
// An entry is an entry on a queue.
|
||||
// It holds both the instruction pc and the actual thread.
|
||||
// Some queue entries are just place holders so that the machine
|
||||
// knows it has considered that pc. Such entries have t == nil.
|
||||
type entry struct {
|
||||
pc uint32
|
||||
t *thread
|
||||
}
|
||||
|
||||
// A thread is the state of a single path through the machine:
|
||||
// an instruction and a corresponding capture array.
|
||||
// See https://swtch.com/~rsc/regexp/regexp2.html
|
||||
type thread struct {
|
||||
inst *syntax.Inst
|
||||
cap []int
|
||||
}
|
||||
|
||||
// A machine holds all the state during an NFA simulation for p.
|
||||
type machine struct {
|
||||
re *Regexp // corresponding Regexp
|
||||
p *syntax.Prog // compiled program
|
||||
q0, q1 queue // two queues for runq, nextq
|
||||
pool []*thread // pool of available threads
|
||||
matched bool // whether a match was found
|
||||
matchcap []int // capture information for the match
|
||||
|
||||
inputs inputs
|
||||
}
|
||||
|
||||
type inputs struct {
|
||||
// cached inputs, to avoid allocation
|
||||
bytes inputBytes
|
||||
string inputString
|
||||
reader inputReader
|
||||
}
|
||||
|
||||
func (i *inputs) newBytes(b []byte) input {
|
||||
i.bytes.str = b
|
||||
return &i.bytes
|
||||
}
|
||||
|
||||
func (i *inputs) newString(s string) input {
|
||||
i.string.str = s
|
||||
return &i.string
|
||||
}
|
||||
|
||||
func (i *inputs) newReader(r io.RuneReader) input {
|
||||
i.reader.r = r
|
||||
i.reader.atEOT = false
|
||||
i.reader.pos = 0
|
||||
return &i.reader
|
||||
}
|
||||
|
||||
func (i *inputs) clear() {
|
||||
// We need to clear 1 of these.
|
||||
// Avoid the expense of clearing the others (pointer write barrier).
|
||||
if i.bytes.str != nil {
|
||||
i.bytes.str = nil
|
||||
} else if i.reader.r != nil {
|
||||
i.reader.r = nil
|
||||
} else {
|
||||
i.string.str = ""
|
||||
}
|
||||
}
|
||||
|
||||
func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) {
|
||||
if r != nil {
|
||||
return i.newReader(r), 0
|
||||
}
|
||||
if b != nil {
|
||||
return i.newBytes(b), len(b)
|
||||
}
|
||||
return i.newString(s), len(s)
|
||||
}
|
||||
|
||||
func (m *machine) init(ncap int) {
|
||||
for _, t := range m.pool {
|
||||
t.cap = t.cap[:ncap]
|
||||
}
|
||||
m.matchcap = m.matchcap[:ncap]
|
||||
}
|
||||
|
||||
// alloc allocates a new thread with the given instruction.
|
||||
// It uses the free pool if possible.
|
||||
func (m *machine) alloc(i *syntax.Inst) *thread {
|
||||
var t *thread
|
||||
if n := len(m.pool); n > 0 {
|
||||
t = m.pool[n-1]
|
||||
m.pool = m.pool[:n-1]
|
||||
} else {
|
||||
t = new(thread)
|
||||
t.cap = make([]int, len(m.matchcap), cap(m.matchcap))
|
||||
}
|
||||
t.inst = i
|
||||
return t
|
||||
}
|
||||
|
||||
// A lazyFlag is a lazily-evaluated syntax.EmptyOp,
|
||||
// for checking zero-width flags like ^ $ \A \z \B \b.
|
||||
// It records the pair of relevant runes and does not
|
||||
// determine the implied flags until absolutely necessary
|
||||
// (most of the time, that means never).
|
||||
type lazyFlag uint64
|
||||
|
||||
func newLazyFlag(r1, r2 rune) lazyFlag {
|
||||
return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2)))
|
||||
}
|
||||
|
||||
func (f lazyFlag) match(op syntax.EmptyOp) bool {
|
||||
if op == 0 {
|
||||
return true
|
||||
}
|
||||
r1 := rune(f >> 32)
|
||||
if op&syntax.EmptyBeginLine != 0 {
|
||||
if r1 != '\n' && r1 >= 0 {
|
||||
return false
|
||||
}
|
||||
op &^= syntax.EmptyBeginLine
|
||||
}
|
||||
if op&syntax.EmptyBeginText != 0 {
|
||||
if r1 >= 0 {
|
||||
return false
|
||||
}
|
||||
op &^= syntax.EmptyBeginText
|
||||
}
|
||||
if op == 0 {
|
||||
return true
|
||||
}
|
||||
r2 := rune(f)
|
||||
if op&syntax.EmptyEndLine != 0 {
|
||||
if r2 != '\n' && r2 >= 0 {
|
||||
return false
|
||||
}
|
||||
op &^= syntax.EmptyEndLine
|
||||
}
|
||||
if op&syntax.EmptyEndText != 0 {
|
||||
if r2 >= 0 {
|
||||
return false
|
||||
}
|
||||
op &^= syntax.EmptyEndText
|
||||
}
|
||||
if op == 0 {
|
||||
return true
|
||||
}
|
||||
if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) {
|
||||
op &^= syntax.EmptyWordBoundary
|
||||
} else {
|
||||
op &^= syntax.EmptyNoWordBoundary
|
||||
}
|
||||
return op == 0
|
||||
}
|
||||
|
||||
// match runs the machine over the input starting at pos.
|
||||
// It reports whether a match was found.
|
||||
// If so, m.matchcap holds the submatch information.
|
||||
func (m *machine) match(i input, pos int) bool {
|
||||
startCond := m.re.cond
|
||||
if startCond == ^syntax.EmptyOp(0) { // impossible
|
||||
return false
|
||||
}
|
||||
m.matched = false
|
||||
for i := range m.matchcap {
|
||||
m.matchcap[i] = -1
|
||||
}
|
||||
runq, nextq := &m.q0, &m.q1
|
||||
r, r1 := endOfText, endOfText
|
||||
width, width1 := 0, 0
|
||||
r, width = i.step(pos)
|
||||
if r != endOfText {
|
||||
r1, width1 = i.step(pos + width)
|
||||
}
|
||||
var flag lazyFlag
|
||||
if pos == 0 {
|
||||
flag = newLazyFlag(-1, r)
|
||||
} else {
|
||||
flag = i.context(pos)
|
||||
}
|
||||
for {
|
||||
if len(runq.dense) == 0 {
|
||||
if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
|
||||
// Anchored match, past beginning of text.
|
||||
break
|
||||
}
|
||||
if m.matched {
|
||||
// Have match; finished exploring alternatives.
|
||||
break
|
||||
}
|
||||
if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() {
|
||||
// Match requires literal prefix; fast search for it.
|
||||
advance := i.index(m.re, pos)
|
||||
if advance < 0 {
|
||||
break
|
||||
}
|
||||
pos += advance
|
||||
r, width = i.step(pos)
|
||||
r1, width1 = i.step(pos + width)
|
||||
}
|
||||
}
|
||||
if !m.matched {
|
||||
if len(m.matchcap) > 0 {
|
||||
m.matchcap[0] = pos
|
||||
}
|
||||
m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil)
|
||||
}
|
||||
flag = newLazyFlag(r, r1)
|
||||
m.step(runq, nextq, pos, pos+width, r, &flag)
|
||||
if width == 0 {
|
||||
break
|
||||
}
|
||||
if len(m.matchcap) == 0 && m.matched {
|
||||
// Found a match and not paying attention
|
||||
// to where it is, so any match will do.
|
||||
break
|
||||
}
|
||||
pos += width
|
||||
r, width = r1, width1
|
||||
if r != endOfText {
|
||||
r1, width1 = i.step(pos + width)
|
||||
}
|
||||
runq, nextq = nextq, runq
|
||||
}
|
||||
m.clear(nextq)
|
||||
return m.matched
|
||||
}
|
||||
|
||||
// clear frees all threads on the thread queue.
|
||||
func (m *machine) clear(q *queue) {
|
||||
for _, d := range q.dense {
|
||||
if d.t != nil {
|
||||
m.pool = append(m.pool, d.t)
|
||||
}
|
||||
}
|
||||
q.dense = q.dense[:0]
|
||||
}
|
||||
|
||||
// step executes one step of the machine, running each of the threads
|
||||
// on runq and appending new threads to nextq.
|
||||
// The step processes the rune c (which may be endOfText),
|
||||
// which starts at position pos and ends at nextPos.
|
||||
// nextCond gives the setting for the empty-width flags after c.
|
||||
func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) {
|
||||
longest := m.re.longest
|
||||
for j := 0; j < len(runq.dense); j++ {
|
||||
d := &runq.dense[j]
|
||||
t := d.t
|
||||
if t == nil {
|
||||
continue
|
||||
}
|
||||
if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] {
|
||||
m.pool = append(m.pool, t)
|
||||
continue
|
||||
}
|
||||
i := t.inst
|
||||
add := false
|
||||
switch i.Op {
|
||||
default:
|
||||
panic("bad inst")
|
||||
|
||||
case syntax.InstMatch:
|
||||
if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) {
|
||||
t.cap[1] = pos
|
||||
copy(m.matchcap, t.cap)
|
||||
}
|
||||
if !longest {
|
||||
// First-match mode: cut off all lower-priority threads.
|
||||
for _, d := range runq.dense[j+1:] {
|
||||
if d.t != nil {
|
||||
m.pool = append(m.pool, d.t)
|
||||
}
|
||||
}
|
||||
runq.dense = runq.dense[:0]
|
||||
}
|
||||
m.matched = true
|
||||
|
||||
case syntax.InstRune:
|
||||
add = i.MatchRune(c)
|
||||
case syntax.InstRune1:
|
||||
add = c == i.Rune[0]
|
||||
case syntax.InstRuneAny:
|
||||
add = true
|
||||
case syntax.InstRuneAnyNotNL:
|
||||
add = c != '\n'
|
||||
}
|
||||
if add {
|
||||
t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t)
|
||||
}
|
||||
if t != nil {
|
||||
m.pool = append(m.pool, t)
|
||||
}
|
||||
}
|
||||
runq.dense = runq.dense[:0]
|
||||
}
|
||||
|
||||
// add adds an entry to q for pc, unless the q already has such an entry.
|
||||
// It also recursively adds an entry for all instructions reachable from pc by following
|
||||
// empty-width conditions satisfied by cond. pos gives the current position
|
||||
// in the input.
|
||||
func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread {
|
||||
Again:
|
||||
if pc == 0 {
|
||||
return t
|
||||
}
|
||||
if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc {
|
||||
return t
|
||||
}
|
||||
|
||||
j := len(q.dense)
|
||||
q.dense = q.dense[:j+1]
|
||||
d := &q.dense[j]
|
||||
d.t = nil
|
||||
d.pc = pc
|
||||
q.sparse[pc] = uint32(j)
|
||||
|
||||
i := &m.p.Inst[pc]
|
||||
switch i.Op {
|
||||
default:
|
||||
panic("unhandled")
|
||||
case syntax.InstFail:
|
||||
// nothing
|
||||
case syntax.InstAlt, syntax.InstAltMatch:
|
||||
t = m.add(q, i.Out, pos, cap, cond, t)
|
||||
pc = i.Arg
|
||||
goto Again
|
||||
case syntax.InstEmptyWidth:
|
||||
if cond.match(syntax.EmptyOp(i.Arg)) {
|
||||
pc = i.Out
|
||||
goto Again
|
||||
}
|
||||
case syntax.InstNop:
|
||||
pc = i.Out
|
||||
goto Again
|
||||
case syntax.InstCapture:
|
||||
if int(i.Arg) < len(cap) {
|
||||
opos := cap[i.Arg]
|
||||
cap[i.Arg] = pos
|
||||
m.add(q, i.Out, pos, cap, cond, nil)
|
||||
cap[i.Arg] = opos
|
||||
} else {
|
||||
pc = i.Out
|
||||
goto Again
|
||||
}
|
||||
case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
|
||||
if t == nil {
|
||||
t = m.alloc(i)
|
||||
} else {
|
||||
t.inst = i
|
||||
}
|
||||
if len(cap) > 0 && &t.cap[0] != &cap[0] {
|
||||
copy(t.cap, cap)
|
||||
}
|
||||
d.t = t
|
||||
t = nil
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
type onePassMachine struct {
|
||||
inputs inputs
|
||||
matchcap []int
|
||||
}
|
||||
|
||||
var onePassPool sync.Pool
|
||||
|
||||
func newOnePassMachine() *onePassMachine {
|
||||
m, ok := onePassPool.Get().(*onePassMachine)
|
||||
if !ok {
|
||||
m = new(onePassMachine)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func freeOnePassMachine(m *onePassMachine) {
|
||||
m.inputs.clear()
|
||||
onePassPool.Put(m)
|
||||
}
|
||||
|
||||
// doOnePass implements r.doExecute using the one-pass execution engine.
|
||||
func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int {
|
||||
startCond := re.cond
|
||||
if startCond == ^syntax.EmptyOp(0) { // impossible
|
||||
return nil
|
||||
}
|
||||
|
||||
m := newOnePassMachine()
|
||||
if cap(m.matchcap) < ncap {
|
||||
m.matchcap = make([]int, ncap)
|
||||
} else {
|
||||
m.matchcap = m.matchcap[:ncap]
|
||||
}
|
||||
|
||||
matched := false
|
||||
for i := range m.matchcap {
|
||||
m.matchcap[i] = -1
|
||||
}
|
||||
|
||||
i, _ := m.inputs.init(ir, ib, is)
|
||||
|
||||
r, r1 := endOfText, endOfText
|
||||
width, width1 := 0, 0
|
||||
r, width = i.step(pos)
|
||||
if r != endOfText {
|
||||
r1, width1 = i.step(pos + width)
|
||||
}
|
||||
var flag lazyFlag
|
||||
if pos == 0 {
|
||||
flag = newLazyFlag(-1, r)
|
||||
} else {
|
||||
flag = i.context(pos)
|
||||
}
|
||||
pc := re.onepass.Start
|
||||
inst := &re.onepass.Inst[pc]
|
||||
// If there is a simple literal prefix, skip over it.
|
||||
if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) &&
|
||||
len(re.prefix) > 0 && i.canCheckPrefix() {
|
||||
// Match requires literal prefix; fast search for it.
|
||||
if !i.hasPrefix(re) {
|
||||
goto Return
|
||||
}
|
||||
pos += len(re.prefix)
|
||||
r, width = i.step(pos)
|
||||
r1, width1 = i.step(pos + width)
|
||||
flag = i.context(pos)
|
||||
pc = int(re.prefixEnd)
|
||||
}
|
||||
for {
|
||||
inst = &re.onepass.Inst[pc]
|
||||
pc = int(inst.Out)
|
||||
switch inst.Op {
|
||||
default:
|
||||
panic("bad inst")
|
||||
case syntax.InstMatch:
|
||||
matched = true
|
||||
if len(m.matchcap) > 0 {
|
||||
m.matchcap[0] = 0
|
||||
m.matchcap[1] = pos
|
||||
}
|
||||
goto Return
|
||||
case syntax.InstRune:
|
||||
if !inst.MatchRune(r) {
|
||||
goto Return
|
||||
}
|
||||
case syntax.InstRune1:
|
||||
if r != inst.Rune[0] {
|
||||
goto Return
|
||||
}
|
||||
case syntax.InstRuneAny:
|
||||
// Nothing
|
||||
case syntax.InstRuneAnyNotNL:
|
||||
if r == '\n' {
|
||||
goto Return
|
||||
}
|
||||
// peek at the input rune to see which branch of the Alt to take
|
||||
case syntax.InstAlt, syntax.InstAltMatch:
|
||||
pc = int(onePassNext(inst, r))
|
||||
continue
|
||||
case syntax.InstFail:
|
||||
goto Return
|
||||
case syntax.InstNop:
|
||||
continue
|
||||
case syntax.InstEmptyWidth:
|
||||
if !flag.match(syntax.EmptyOp(inst.Arg)) {
|
||||
goto Return
|
||||
}
|
||||
continue
|
||||
case syntax.InstCapture:
|
||||
if int(inst.Arg) < len(m.matchcap) {
|
||||
m.matchcap[inst.Arg] = pos
|
||||
}
|
||||
continue
|
||||
}
|
||||
if width == 0 {
|
||||
break
|
||||
}
|
||||
flag = newLazyFlag(r, r1)
|
||||
pos += width
|
||||
r, width = r1, width1
|
||||
if r != endOfText {
|
||||
r1, width1 = i.step(pos + width)
|
||||
}
|
||||
}
|
||||
|
||||
Return:
|
||||
if !matched {
|
||||
freeOnePassMachine(m)
|
||||
return nil
|
||||
}
|
||||
|
||||
dstCap = append(dstCap, m.matchcap...)
|
||||
freeOnePassMachine(m)
|
||||
return dstCap
|
||||
}
|
||||
|
||||
// doMatch reports whether either r, b or s match the regexp.
|
||||
func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool {
|
||||
return re.doExecute(r, b, s, 0, 0, nil) != nil
|
||||
}
|
||||
|
||||
// doExecute finds the leftmost match in the input, appends the position
|
||||
// of its subexpressions to dstCap and returns dstCap.
|
||||
//
|
||||
// nil is returned if no matches are found and non-nil if matches are found.
|
||||
func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int {
|
||||
if dstCap == nil {
|
||||
// Make sure 'return dstCap' is non-nil.
|
||||
dstCap = arrayNoInts[:0:0]
|
||||
}
|
||||
|
||||
if r == nil && len(b)+len(s) < re.minInputLen {
|
||||
return nil
|
||||
}
|
||||
|
||||
if re.onepass != nil {
|
||||
return re.doOnePass(r, b, s, pos, ncap, dstCap)
|
||||
}
|
||||
if r == nil && len(b)+len(s) < re.maxBitStateLen {
|
||||
return re.backtrack(b, s, pos, ncap, dstCap)
|
||||
}
|
||||
|
||||
m := re.get()
|
||||
i, _ := m.inputs.init(r, b, s)
|
||||
|
||||
m.init(ncap)
|
||||
if !m.match(i, pos) {
|
||||
re.put(m)
|
||||
return nil
|
||||
}
|
||||
|
||||
dstCap = append(dstCap, m.matchcap...)
|
||||
re.put(m)
|
||||
return dstCap
|
||||
}
|
||||
|
||||
// arrayNoInts is returned by doExecute match if nil dstCap is passed
|
||||
// to it with ncap=0.
|
||||
var arrayNoInts [0]int
|
||||
500
vendor/github.com/grafana/regexp/onepass.go
generated
vendored
Normal file
500
vendor/github.com/grafana/regexp/onepass.go
generated
vendored
Normal file
|
|
@ -0,0 +1,500 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package regexp
|
||||
|
||||
import (
|
||||
"regexp/syntax"
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// "One-pass" regexp execution.
|
||||
// Some regexps can be analyzed to determine that they never need
|
||||
// backtracking: they are guaranteed to run in one pass over the string
|
||||
// without bothering to save all the usual NFA state.
|
||||
// Detect those and execute them more quickly.
|
||||
|
||||
// A onePassProg is a compiled one-pass regular expression program.
|
||||
// It is the same as syntax.Prog except for the use of onePassInst.
|
||||
type onePassProg struct {
|
||||
Inst []onePassInst
|
||||
Start int // index of start instruction
|
||||
NumCap int // number of InstCapture insts in re
|
||||
}
|
||||
|
||||
// A onePassInst is a single instruction in a one-pass regular expression program.
|
||||
// It is the same as syntax.Inst except for the new 'Next' field.
|
||||
type onePassInst struct {
|
||||
syntax.Inst
|
||||
Next []uint32
|
||||
}
|
||||
|
||||
// onePassPrefix returns a literal string that all matches for the
|
||||
// regexp must start with. Complete is true if the prefix
|
||||
// is the entire match. Pc is the index of the last rune instruction
|
||||
// in the string. The onePassPrefix skips over the mandatory
|
||||
// EmptyBeginText.
|
||||
func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) {
|
||||
i := &p.Inst[p.Start]
|
||||
if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 {
|
||||
return "", i.Op == syntax.InstMatch, uint32(p.Start)
|
||||
}
|
||||
pc = i.Out
|
||||
i = &p.Inst[pc]
|
||||
for i.Op == syntax.InstNop {
|
||||
pc = i.Out
|
||||
i = &p.Inst[pc]
|
||||
}
|
||||
// Avoid allocation of buffer if prefix is empty.
|
||||
if iop(i) != syntax.InstRune || len(i.Rune) != 1 {
|
||||
return "", i.Op == syntax.InstMatch, uint32(p.Start)
|
||||
}
|
||||
|
||||
// Have prefix; gather characters.
|
||||
var buf strings.Builder
|
||||
for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError {
|
||||
buf.WriteRune(i.Rune[0])
|
||||
pc, i = i.Out, &p.Inst[i.Out]
|
||||
}
|
||||
if i.Op == syntax.InstEmptyWidth &&
|
||||
syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 &&
|
||||
p.Inst[i.Out].Op == syntax.InstMatch {
|
||||
complete = true
|
||||
}
|
||||
return buf.String(), complete, pc
|
||||
}
|
||||
|
||||
// onePassNext selects the next actionable state of the prog, based on the input character.
|
||||
// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine.
|
||||
// One of the alternates may ultimately lead without input to end of line. If the instruction
|
||||
// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next.
|
||||
func onePassNext(i *onePassInst, r rune) uint32 {
|
||||
next := i.MatchRunePos(r)
|
||||
if next >= 0 {
|
||||
return i.Next[next]
|
||||
}
|
||||
if i.Op == syntax.InstAltMatch {
|
||||
return i.Out
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func iop(i *syntax.Inst) syntax.InstOp {
|
||||
op := i.Op
|
||||
switch op {
|
||||
case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
|
||||
op = syntax.InstRune
|
||||
}
|
||||
return op
|
||||
}
|
||||
|
||||
// Sparse Array implementation is used as a queueOnePass.
|
||||
type queueOnePass struct {
|
||||
sparse []uint32
|
||||
dense []uint32
|
||||
size, nextIndex uint32
|
||||
}
|
||||
|
||||
func (q *queueOnePass) empty() bool {
|
||||
return q.nextIndex >= q.size
|
||||
}
|
||||
|
||||
func (q *queueOnePass) next() (n uint32) {
|
||||
n = q.dense[q.nextIndex]
|
||||
q.nextIndex++
|
||||
return
|
||||
}
|
||||
|
||||
func (q *queueOnePass) clear() {
|
||||
q.size = 0
|
||||
q.nextIndex = 0
|
||||
}
|
||||
|
||||
func (q *queueOnePass) contains(u uint32) bool {
|
||||
if u >= uint32(len(q.sparse)) {
|
||||
return false
|
||||
}
|
||||
return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u
|
||||
}
|
||||
|
||||
func (q *queueOnePass) insert(u uint32) {
|
||||
if !q.contains(u) {
|
||||
q.insertNew(u)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *queueOnePass) insertNew(u uint32) {
|
||||
if u >= uint32(len(q.sparse)) {
|
||||
return
|
||||
}
|
||||
q.sparse[u] = q.size
|
||||
q.dense[q.size] = u
|
||||
q.size++
|
||||
}
|
||||
|
||||
func newQueue(size int) (q *queueOnePass) {
|
||||
return &queueOnePass{
|
||||
sparse: make([]uint32, size),
|
||||
dense: make([]uint32, size),
|
||||
}
|
||||
}
|
||||
|
||||
// mergeRuneSets merges two non-intersecting runesets, and returns the merged result,
|
||||
// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index
|
||||
// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a
|
||||
// NextIp array with the single element mergeFailed is returned.
|
||||
// The code assumes that both inputs contain ordered and non-intersecting rune pairs.
|
||||
const mergeFailed = uint32(0xffffffff)
|
||||
|
||||
var (
|
||||
noRune = []rune{}
|
||||
noNext = []uint32{mergeFailed}
|
||||
)
|
||||
|
||||
func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) {
|
||||
leftLen := len(*leftRunes)
|
||||
rightLen := len(*rightRunes)
|
||||
if leftLen&0x1 != 0 || rightLen&0x1 != 0 {
|
||||
panic("mergeRuneSets odd length []rune")
|
||||
}
|
||||
var (
|
||||
lx, rx int
|
||||
)
|
||||
merged := make([]rune, 0)
|
||||
next := make([]uint32, 0)
|
||||
ok := true
|
||||
defer func() {
|
||||
if !ok {
|
||||
merged = nil
|
||||
next = nil
|
||||
}
|
||||
}()
|
||||
|
||||
ix := -1
|
||||
extend := func(newLow *int, newArray *[]rune, pc uint32) bool {
|
||||
if ix > 0 && (*newArray)[*newLow] <= merged[ix] {
|
||||
return false
|
||||
}
|
||||
merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1])
|
||||
*newLow += 2
|
||||
ix += 2
|
||||
next = append(next, pc)
|
||||
return true
|
||||
}
|
||||
|
||||
for lx < leftLen || rx < rightLen {
|
||||
switch {
|
||||
case rx >= rightLen:
|
||||
ok = extend(&lx, leftRunes, leftPC)
|
||||
case lx >= leftLen:
|
||||
ok = extend(&rx, rightRunes, rightPC)
|
||||
case (*rightRunes)[rx] < (*leftRunes)[lx]:
|
||||
ok = extend(&rx, rightRunes, rightPC)
|
||||
default:
|
||||
ok = extend(&lx, leftRunes, leftPC)
|
||||
}
|
||||
if !ok {
|
||||
return noRune, noNext
|
||||
}
|
||||
}
|
||||
return merged, next
|
||||
}
|
||||
|
||||
// cleanupOnePass drops working memory, and restores certain shortcut instructions.
|
||||
func cleanupOnePass(prog *onePassProg, original *syntax.Prog) {
|
||||
for ix, instOriginal := range original.Inst {
|
||||
switch instOriginal.Op {
|
||||
case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune:
|
||||
case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail:
|
||||
prog.Inst[ix].Next = nil
|
||||
case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
|
||||
prog.Inst[ix].Next = nil
|
||||
prog.Inst[ix] = onePassInst{Inst: instOriginal}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// onePassCopy creates a copy of the original Prog, as we'll be modifying it.
|
||||
func onePassCopy(prog *syntax.Prog) *onePassProg {
|
||||
p := &onePassProg{
|
||||
Start: prog.Start,
|
||||
NumCap: prog.NumCap,
|
||||
Inst: make([]onePassInst, len(prog.Inst)),
|
||||
}
|
||||
for i, inst := range prog.Inst {
|
||||
p.Inst[i] = onePassInst{Inst: inst}
|
||||
}
|
||||
|
||||
// rewrites one or more common Prog constructs that enable some otherwise
|
||||
// non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at
|
||||
// ip A, that points to ips B & C.
|
||||
// A:BC + B:DA => A:BC + B:CD
|
||||
// A:BC + B:DC => A:DC + B:DC
|
||||
for pc := range p.Inst {
|
||||
switch p.Inst[pc].Op {
|
||||
default:
|
||||
continue
|
||||
case syntax.InstAlt, syntax.InstAltMatch:
|
||||
// A:Bx + B:Ay
|
||||
p_A_Other := &p.Inst[pc].Out
|
||||
p_A_Alt := &p.Inst[pc].Arg
|
||||
// make sure a target is another Alt
|
||||
instAlt := p.Inst[*p_A_Alt]
|
||||
if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) {
|
||||
p_A_Alt, p_A_Other = p_A_Other, p_A_Alt
|
||||
instAlt = p.Inst[*p_A_Alt]
|
||||
if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
instOther := p.Inst[*p_A_Other]
|
||||
// Analyzing both legs pointing to Alts is for another day
|
||||
if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch {
|
||||
// too complicated
|
||||
continue
|
||||
}
|
||||
// simple empty transition loop
|
||||
// A:BC + B:DA => A:BC + B:DC
|
||||
p_B_Alt := &p.Inst[*p_A_Alt].Out
|
||||
p_B_Other := &p.Inst[*p_A_Alt].Arg
|
||||
patch := false
|
||||
if instAlt.Out == uint32(pc) {
|
||||
patch = true
|
||||
} else if instAlt.Arg == uint32(pc) {
|
||||
patch = true
|
||||
p_B_Alt, p_B_Other = p_B_Other, p_B_Alt
|
||||
}
|
||||
if patch {
|
||||
*p_B_Alt = *p_A_Other
|
||||
}
|
||||
|
||||
// empty transition to common target
|
||||
// A:BC + B:DC => A:DC + B:DC
|
||||
if *p_A_Other == *p_B_Alt {
|
||||
*p_A_Alt = *p_B_Other
|
||||
}
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune}
|
||||
var anyRune = []rune{0, unicode.MaxRune}
|
||||
|
||||
// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt,
|
||||
// the match engine can always tell which branch to take. The routine may modify
|
||||
// p if it is turned into a onepass Prog. If it isn't possible for this to be a
|
||||
// onepass Prog, the Prog nil is returned. makeOnePass is recursive
|
||||
// to the size of the Prog.
|
||||
func makeOnePass(p *onePassProg) *onePassProg {
|
||||
// If the machine is very long, it's not worth the time to check if we can use one pass.
|
||||
if len(p.Inst) >= 1000 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
instQueue = newQueue(len(p.Inst))
|
||||
visitQueue = newQueue(len(p.Inst))
|
||||
check func(uint32, []bool) bool
|
||||
onePassRunes = make([][]rune, len(p.Inst))
|
||||
)
|
||||
|
||||
// check that paths from Alt instructions are unambiguous, and rebuild the new
|
||||
// program as a onepass program
|
||||
check = func(pc uint32, m []bool) (ok bool) {
|
||||
ok = true
|
||||
inst := &p.Inst[pc]
|
||||
if visitQueue.contains(pc) {
|
||||
return
|
||||
}
|
||||
visitQueue.insert(pc)
|
||||
switch inst.Op {
|
||||
case syntax.InstAlt, syntax.InstAltMatch:
|
||||
ok = check(inst.Out, m) && check(inst.Arg, m)
|
||||
// check no-input paths to InstMatch
|
||||
matchOut := m[inst.Out]
|
||||
matchArg := m[inst.Arg]
|
||||
if matchOut && matchArg {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
// Match on empty goes in inst.Out
|
||||
if matchArg {
|
||||
inst.Out, inst.Arg = inst.Arg, inst.Out
|
||||
matchOut, matchArg = matchArg, matchOut
|
||||
}
|
||||
if matchOut {
|
||||
m[pc] = true
|
||||
inst.Op = syntax.InstAltMatch
|
||||
}
|
||||
|
||||
// build a dispatch operator from the two legs of the alt.
|
||||
onePassRunes[pc], inst.Next = mergeRuneSets(
|
||||
&onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg)
|
||||
if len(inst.Next) > 0 && inst.Next[0] == mergeFailed {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
case syntax.InstCapture, syntax.InstNop:
|
||||
ok = check(inst.Out, m)
|
||||
m[pc] = m[inst.Out]
|
||||
// pass matching runes back through these no-ops.
|
||||
onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
|
||||
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
|
||||
for i := range inst.Next {
|
||||
inst.Next[i] = inst.Out
|
||||
}
|
||||
case syntax.InstEmptyWidth:
|
||||
ok = check(inst.Out, m)
|
||||
m[pc] = m[inst.Out]
|
||||
onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
|
||||
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
|
||||
for i := range inst.Next {
|
||||
inst.Next[i] = inst.Out
|
||||
}
|
||||
case syntax.InstMatch, syntax.InstFail:
|
||||
m[pc] = inst.Op == syntax.InstMatch
|
||||
case syntax.InstRune:
|
||||
m[pc] = false
|
||||
if len(inst.Next) > 0 {
|
||||
break
|
||||
}
|
||||
instQueue.insert(inst.Out)
|
||||
if len(inst.Rune) == 0 {
|
||||
onePassRunes[pc] = []rune{}
|
||||
inst.Next = []uint32{inst.Out}
|
||||
break
|
||||
}
|
||||
runes := make([]rune, 0)
|
||||
if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 {
|
||||
r0 := inst.Rune[0]
|
||||
runes = append(runes, r0, r0)
|
||||
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
|
||||
runes = append(runes, r1, r1)
|
||||
}
|
||||
slices.Sort(runes)
|
||||
} else {
|
||||
runes = append(runes, inst.Rune...)
|
||||
}
|
||||
onePassRunes[pc] = runes
|
||||
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
|
||||
for i := range inst.Next {
|
||||
inst.Next[i] = inst.Out
|
||||
}
|
||||
inst.Op = syntax.InstRune
|
||||
case syntax.InstRune1:
|
||||
m[pc] = false
|
||||
if len(inst.Next) > 0 {
|
||||
break
|
||||
}
|
||||
instQueue.insert(inst.Out)
|
||||
runes := []rune{}
|
||||
// expand case-folded runes
|
||||
if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 {
|
||||
r0 := inst.Rune[0]
|
||||
runes = append(runes, r0, r0)
|
||||
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
|
||||
runes = append(runes, r1, r1)
|
||||
}
|
||||
slices.Sort(runes)
|
||||
} else {
|
||||
runes = append(runes, inst.Rune[0], inst.Rune[0])
|
||||
}
|
||||
onePassRunes[pc] = runes
|
||||
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
|
||||
for i := range inst.Next {
|
||||
inst.Next[i] = inst.Out
|
||||
}
|
||||
inst.Op = syntax.InstRune
|
||||
case syntax.InstRuneAny:
|
||||
m[pc] = false
|
||||
if len(inst.Next) > 0 {
|
||||
break
|
||||
}
|
||||
instQueue.insert(inst.Out)
|
||||
onePassRunes[pc] = append([]rune{}, anyRune...)
|
||||
inst.Next = []uint32{inst.Out}
|
||||
case syntax.InstRuneAnyNotNL:
|
||||
m[pc] = false
|
||||
if len(inst.Next) > 0 {
|
||||
break
|
||||
}
|
||||
instQueue.insert(inst.Out)
|
||||
onePassRunes[pc] = append([]rune{}, anyRuneNotNL...)
|
||||
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
|
||||
for i := range inst.Next {
|
||||
inst.Next[i] = inst.Out
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
instQueue.clear()
|
||||
instQueue.insert(uint32(p.Start))
|
||||
m := make([]bool, len(p.Inst))
|
||||
for !instQueue.empty() {
|
||||
visitQueue.clear()
|
||||
pc := instQueue.next()
|
||||
if !check(pc, m) {
|
||||
p = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
if p != nil {
|
||||
for i := range p.Inst {
|
||||
p.Inst[i].Rune = onePassRunes[i]
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog
|
||||
// can be recharacterized as a one-pass regexp program, or syntax.nil if the
|
||||
// Prog cannot be converted. For a one pass prog, the fundamental condition that must
|
||||
// be true is: at any InstAlt, there must be no ambiguity about what branch to take.
|
||||
func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
|
||||
if prog.Start == 0 {
|
||||
return nil
|
||||
}
|
||||
// onepass regexp is anchored
|
||||
if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth ||
|
||||
syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText {
|
||||
return nil
|
||||
}
|
||||
// every instruction leading to InstMatch must be EmptyEndText
|
||||
for _, inst := range prog.Inst {
|
||||
opOut := prog.Inst[inst.Out].Op
|
||||
switch inst.Op {
|
||||
default:
|
||||
if opOut == syntax.InstMatch {
|
||||
return nil
|
||||
}
|
||||
case syntax.InstAlt, syntax.InstAltMatch:
|
||||
if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch {
|
||||
return nil
|
||||
}
|
||||
case syntax.InstEmptyWidth:
|
||||
if opOut == syntax.InstMatch {
|
||||
if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText {
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Creates a slightly optimized copy of the original Prog
|
||||
// that cleans up some Prog idioms that block valid onepass programs
|
||||
p = onePassCopy(prog)
|
||||
|
||||
// checkAmbiguity on InstAlts, build onepass Prog if possible
|
||||
p = makeOnePass(p)
|
||||
|
||||
if p != nil {
|
||||
cleanupOnePass(p, prog)
|
||||
}
|
||||
return p
|
||||
}
|
||||
1304
vendor/github.com/grafana/regexp/regexp.go
generated
vendored
Normal file
1304
vendor/github.com/grafana/regexp/regexp.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
3
vendor/github.com/klauspost/cpuid/v2/README.md
generated
vendored
3
vendor/github.com/klauspost/cpuid/v2/README.md
generated
vendored
|
|
@ -285,6 +285,7 @@ Exit Code 1
|
|||
| AMXCOMPLEX | Tile computational operations on complex numbers |
|
||||
| AMXTILE | Tile architecture |
|
||||
| AMXTF32 | Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile |
|
||||
| AMXTRANSPOSE | Tile multiply where the first operand is transposed |
|
||||
| APX_F | Intel APX |
|
||||
| AVX | AVX functions |
|
||||
| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported |
|
||||
|
|
@ -420,6 +421,8 @@ Exit Code 1
|
|||
| SHA | Intel SHA Extensions |
|
||||
| SME | AMD Secure Memory Encryption supported |
|
||||
| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced |
|
||||
| SM3_X86 | SM3 instructions |
|
||||
| SM4_X86 | SM4 instructions |
|
||||
| SPEC_CTRL_SSBD | Speculative Store Bypass Disable |
|
||||
| SRBDS_CTRL | SRBDS mitigation MSR available |
|
||||
| SSE | SSE functions |
|
||||
|
|
|
|||
15
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
15
vendor/github.com/klauspost/cpuid/v2/cpuid.go
generated
vendored
|
|
@ -85,6 +85,7 @@ const (
|
|||
AMXTILE // Tile architecture
|
||||
AMXTF32 // Tile architecture
|
||||
AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile
|
||||
AMXTRANSPOSE // Tile multiply where the first operand is transposed
|
||||
APX_F // Intel APX
|
||||
AVX // AVX functions
|
||||
AVX10 // If set the Intel AVX10 Converged Vector ISA is supported
|
||||
|
|
@ -222,6 +223,8 @@ const (
|
|||
SHA // Intel SHA Extensions
|
||||
SME // AMD Secure Memory Encryption supported
|
||||
SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
|
||||
SM3_X86 // SM3 instructions
|
||||
SM4_X86 // SM4 instructions
|
||||
SPEC_CTRL_SSBD // Speculative Store Bypass Disable
|
||||
SRBDS_CTRL // SRBDS mitigation MSR available
|
||||
SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO.
|
||||
|
|
@ -283,7 +286,7 @@ const (
|
|||
CRC32 // CRC32/CRC32C instructions
|
||||
DCPOP // Data cache clean to Point of Persistence (DC CVAP)
|
||||
EVTSTRM // Generic timer
|
||||
FCMA // Floatin point complex number addition and multiplication
|
||||
FCMA // Floating point complex number addition and multiplication
|
||||
FHM // FMLAL and FMLSL instructions
|
||||
FP // Single-precision and double-precision floating point
|
||||
FPHP // Half-precision floating point
|
||||
|
|
@ -878,7 +881,12 @@ func physicalCores() int {
|
|||
v, _ := vendorID()
|
||||
switch v {
|
||||
case Intel:
|
||||
return logicalCores() / threadsPerCore()
|
||||
lc := logicalCores()
|
||||
tpc := threadsPerCore()
|
||||
if lc > 0 && tpc > 0 {
|
||||
return lc / tpc
|
||||
}
|
||||
return 0
|
||||
case AMD, Hygon:
|
||||
lc := logicalCores()
|
||||
tpc := threadsPerCore()
|
||||
|
|
@ -1279,6 +1287,8 @@ func support() flagSet {
|
|||
// CPUID.(EAX=7, ECX=1).EAX
|
||||
eax1, _, _, edx1 := cpuidex(7, 1)
|
||||
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
|
||||
fs.setIf(eax1&(1<<1) != 0, SM3_X86)
|
||||
fs.setIf(eax1&(1<<2) != 0, SM4_X86)
|
||||
fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
|
||||
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
|
||||
fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
|
||||
|
|
@ -1290,6 +1300,7 @@ func support() flagSet {
|
|||
// CPUID.(EAX=7, ECX=1).EDX
|
||||
fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
|
||||
fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
|
||||
fs.setIf(edx1&(1<<6) != 0, AMXTRANSPOSE)
|
||||
fs.setIf(edx1&(1<<7) != 0, AMXTF32)
|
||||
fs.setIf(edx1&(1<<8) != 0, AMXCOMPLEX)
|
||||
fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16)
|
||||
|
|
|
|||
437
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
437
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
generated
vendored
|
|
@ -19,227 +19,230 @@ func _() {
|
|||
_ = x[AMXTILE-9]
|
||||
_ = x[AMXTF32-10]
|
||||
_ = x[AMXCOMPLEX-11]
|
||||
_ = x[APX_F-12]
|
||||
_ = x[AVX-13]
|
||||
_ = x[AVX10-14]
|
||||
_ = x[AVX10_128-15]
|
||||
_ = x[AVX10_256-16]
|
||||
_ = x[AVX10_512-17]
|
||||
_ = x[AVX2-18]
|
||||
_ = x[AVX512BF16-19]
|
||||
_ = x[AVX512BITALG-20]
|
||||
_ = x[AVX512BW-21]
|
||||
_ = x[AVX512CD-22]
|
||||
_ = x[AVX512DQ-23]
|
||||
_ = x[AVX512ER-24]
|
||||
_ = x[AVX512F-25]
|
||||
_ = x[AVX512FP16-26]
|
||||
_ = x[AVX512IFMA-27]
|
||||
_ = x[AVX512PF-28]
|
||||
_ = x[AVX512VBMI-29]
|
||||
_ = x[AVX512VBMI2-30]
|
||||
_ = x[AVX512VL-31]
|
||||
_ = x[AVX512VNNI-32]
|
||||
_ = x[AVX512VP2INTERSECT-33]
|
||||
_ = x[AVX512VPOPCNTDQ-34]
|
||||
_ = x[AVXIFMA-35]
|
||||
_ = x[AVXNECONVERT-36]
|
||||
_ = x[AVXSLOW-37]
|
||||
_ = x[AVXVNNI-38]
|
||||
_ = x[AVXVNNIINT8-39]
|
||||
_ = x[AVXVNNIINT16-40]
|
||||
_ = x[BHI_CTRL-41]
|
||||
_ = x[BMI1-42]
|
||||
_ = x[BMI2-43]
|
||||
_ = x[CETIBT-44]
|
||||
_ = x[CETSS-45]
|
||||
_ = x[CLDEMOTE-46]
|
||||
_ = x[CLMUL-47]
|
||||
_ = x[CLZERO-48]
|
||||
_ = x[CMOV-49]
|
||||
_ = x[CMPCCXADD-50]
|
||||
_ = x[CMPSB_SCADBS_SHORT-51]
|
||||
_ = x[CMPXCHG8-52]
|
||||
_ = x[CPBOOST-53]
|
||||
_ = x[CPPC-54]
|
||||
_ = x[CX16-55]
|
||||
_ = x[EFER_LMSLE_UNS-56]
|
||||
_ = x[ENQCMD-57]
|
||||
_ = x[ERMS-58]
|
||||
_ = x[F16C-59]
|
||||
_ = x[FLUSH_L1D-60]
|
||||
_ = x[FMA3-61]
|
||||
_ = x[FMA4-62]
|
||||
_ = x[FP128-63]
|
||||
_ = x[FP256-64]
|
||||
_ = x[FSRM-65]
|
||||
_ = x[FXSR-66]
|
||||
_ = x[FXSROPT-67]
|
||||
_ = x[GFNI-68]
|
||||
_ = x[HLE-69]
|
||||
_ = x[HRESET-70]
|
||||
_ = x[HTT-71]
|
||||
_ = x[HWA-72]
|
||||
_ = x[HYBRID_CPU-73]
|
||||
_ = x[HYPERVISOR-74]
|
||||
_ = x[IA32_ARCH_CAP-75]
|
||||
_ = x[IA32_CORE_CAP-76]
|
||||
_ = x[IBPB-77]
|
||||
_ = x[IBPB_BRTYPE-78]
|
||||
_ = x[IBRS-79]
|
||||
_ = x[IBRS_PREFERRED-80]
|
||||
_ = x[IBRS_PROVIDES_SMP-81]
|
||||
_ = x[IBS-82]
|
||||
_ = x[IBSBRNTRGT-83]
|
||||
_ = x[IBSFETCHSAM-84]
|
||||
_ = x[IBSFFV-85]
|
||||
_ = x[IBSOPCNT-86]
|
||||
_ = x[IBSOPCNTEXT-87]
|
||||
_ = x[IBSOPSAM-88]
|
||||
_ = x[IBSRDWROPCNT-89]
|
||||
_ = x[IBSRIPINVALIDCHK-90]
|
||||
_ = x[IBS_FETCH_CTLX-91]
|
||||
_ = x[IBS_OPDATA4-92]
|
||||
_ = x[IBS_OPFUSE-93]
|
||||
_ = x[IBS_PREVENTHOST-94]
|
||||
_ = x[IBS_ZEN4-95]
|
||||
_ = x[IDPRED_CTRL-96]
|
||||
_ = x[INT_WBINVD-97]
|
||||
_ = x[INVLPGB-98]
|
||||
_ = x[KEYLOCKER-99]
|
||||
_ = x[KEYLOCKERW-100]
|
||||
_ = x[LAHF-101]
|
||||
_ = x[LAM-102]
|
||||
_ = x[LBRVIRT-103]
|
||||
_ = x[LZCNT-104]
|
||||
_ = x[MCAOVERFLOW-105]
|
||||
_ = x[MCDT_NO-106]
|
||||
_ = x[MCOMMIT-107]
|
||||
_ = x[MD_CLEAR-108]
|
||||
_ = x[MMX-109]
|
||||
_ = x[MMXEXT-110]
|
||||
_ = x[MOVBE-111]
|
||||
_ = x[MOVDIR64B-112]
|
||||
_ = x[MOVDIRI-113]
|
||||
_ = x[MOVSB_ZL-114]
|
||||
_ = x[MOVU-115]
|
||||
_ = x[MPX-116]
|
||||
_ = x[MSRIRC-117]
|
||||
_ = x[MSRLIST-118]
|
||||
_ = x[MSR_PAGEFLUSH-119]
|
||||
_ = x[NRIPS-120]
|
||||
_ = x[NX-121]
|
||||
_ = x[OSXSAVE-122]
|
||||
_ = x[PCONFIG-123]
|
||||
_ = x[POPCNT-124]
|
||||
_ = x[PPIN-125]
|
||||
_ = x[PREFETCHI-126]
|
||||
_ = x[PSFD-127]
|
||||
_ = x[RDPRU-128]
|
||||
_ = x[RDRAND-129]
|
||||
_ = x[RDSEED-130]
|
||||
_ = x[RDTSCP-131]
|
||||
_ = x[RRSBA_CTRL-132]
|
||||
_ = x[RTM-133]
|
||||
_ = x[RTM_ALWAYS_ABORT-134]
|
||||
_ = x[SBPB-135]
|
||||
_ = x[SERIALIZE-136]
|
||||
_ = x[SEV-137]
|
||||
_ = x[SEV_64BIT-138]
|
||||
_ = x[SEV_ALTERNATIVE-139]
|
||||
_ = x[SEV_DEBUGSWAP-140]
|
||||
_ = x[SEV_ES-141]
|
||||
_ = x[SEV_RESTRICTED-142]
|
||||
_ = x[SEV_SNP-143]
|
||||
_ = x[SGX-144]
|
||||
_ = x[SGXLC-145]
|
||||
_ = x[SHA-146]
|
||||
_ = x[SME-147]
|
||||
_ = x[SME_COHERENT-148]
|
||||
_ = x[SPEC_CTRL_SSBD-149]
|
||||
_ = x[SRBDS_CTRL-150]
|
||||
_ = x[SRSO_MSR_FIX-151]
|
||||
_ = x[SRSO_NO-152]
|
||||
_ = x[SRSO_USER_KERNEL_NO-153]
|
||||
_ = x[SSE-154]
|
||||
_ = x[SSE2-155]
|
||||
_ = x[SSE3-156]
|
||||
_ = x[SSE4-157]
|
||||
_ = x[SSE42-158]
|
||||
_ = x[SSE4A-159]
|
||||
_ = x[SSSE3-160]
|
||||
_ = x[STIBP-161]
|
||||
_ = x[STIBP_ALWAYSON-162]
|
||||
_ = x[STOSB_SHORT-163]
|
||||
_ = x[SUCCOR-164]
|
||||
_ = x[SVM-165]
|
||||
_ = x[SVMDA-166]
|
||||
_ = x[SVMFBASID-167]
|
||||
_ = x[SVML-168]
|
||||
_ = x[SVMNP-169]
|
||||
_ = x[SVMPF-170]
|
||||
_ = x[SVMPFT-171]
|
||||
_ = x[SYSCALL-172]
|
||||
_ = x[SYSEE-173]
|
||||
_ = x[TBM-174]
|
||||
_ = x[TDX_GUEST-175]
|
||||
_ = x[TLB_FLUSH_NESTED-176]
|
||||
_ = x[TME-177]
|
||||
_ = x[TOPEXT-178]
|
||||
_ = x[TSCRATEMSR-179]
|
||||
_ = x[TSXLDTRK-180]
|
||||
_ = x[VAES-181]
|
||||
_ = x[VMCBCLEAN-182]
|
||||
_ = x[VMPL-183]
|
||||
_ = x[VMSA_REGPROT-184]
|
||||
_ = x[VMX-185]
|
||||
_ = x[VPCLMULQDQ-186]
|
||||
_ = x[VTE-187]
|
||||
_ = x[WAITPKG-188]
|
||||
_ = x[WBNOINVD-189]
|
||||
_ = x[WRMSRNS-190]
|
||||
_ = x[X87-191]
|
||||
_ = x[XGETBV1-192]
|
||||
_ = x[XOP-193]
|
||||
_ = x[XSAVE-194]
|
||||
_ = x[XSAVEC-195]
|
||||
_ = x[XSAVEOPT-196]
|
||||
_ = x[XSAVES-197]
|
||||
_ = x[AESARM-198]
|
||||
_ = x[ARMCPUID-199]
|
||||
_ = x[ASIMD-200]
|
||||
_ = x[ASIMDDP-201]
|
||||
_ = x[ASIMDHP-202]
|
||||
_ = x[ASIMDRDM-203]
|
||||
_ = x[ATOMICS-204]
|
||||
_ = x[CRC32-205]
|
||||
_ = x[DCPOP-206]
|
||||
_ = x[EVTSTRM-207]
|
||||
_ = x[FCMA-208]
|
||||
_ = x[FHM-209]
|
||||
_ = x[FP-210]
|
||||
_ = x[FPHP-211]
|
||||
_ = x[GPA-212]
|
||||
_ = x[JSCVT-213]
|
||||
_ = x[LRCPC-214]
|
||||
_ = x[PMULL-215]
|
||||
_ = x[RNDR-216]
|
||||
_ = x[TLB-217]
|
||||
_ = x[TS-218]
|
||||
_ = x[SHA1-219]
|
||||
_ = x[SHA2-220]
|
||||
_ = x[SHA3-221]
|
||||
_ = x[SHA512-222]
|
||||
_ = x[SM3-223]
|
||||
_ = x[SM4-224]
|
||||
_ = x[SVE-225]
|
||||
_ = x[lastID-226]
|
||||
_ = x[AMXTRANSPOSE-12]
|
||||
_ = x[APX_F-13]
|
||||
_ = x[AVX-14]
|
||||
_ = x[AVX10-15]
|
||||
_ = x[AVX10_128-16]
|
||||
_ = x[AVX10_256-17]
|
||||
_ = x[AVX10_512-18]
|
||||
_ = x[AVX2-19]
|
||||
_ = x[AVX512BF16-20]
|
||||
_ = x[AVX512BITALG-21]
|
||||
_ = x[AVX512BW-22]
|
||||
_ = x[AVX512CD-23]
|
||||
_ = x[AVX512DQ-24]
|
||||
_ = x[AVX512ER-25]
|
||||
_ = x[AVX512F-26]
|
||||
_ = x[AVX512FP16-27]
|
||||
_ = x[AVX512IFMA-28]
|
||||
_ = x[AVX512PF-29]
|
||||
_ = x[AVX512VBMI-30]
|
||||
_ = x[AVX512VBMI2-31]
|
||||
_ = x[AVX512VL-32]
|
||||
_ = x[AVX512VNNI-33]
|
||||
_ = x[AVX512VP2INTERSECT-34]
|
||||
_ = x[AVX512VPOPCNTDQ-35]
|
||||
_ = x[AVXIFMA-36]
|
||||
_ = x[AVXNECONVERT-37]
|
||||
_ = x[AVXSLOW-38]
|
||||
_ = x[AVXVNNI-39]
|
||||
_ = x[AVXVNNIINT8-40]
|
||||
_ = x[AVXVNNIINT16-41]
|
||||
_ = x[BHI_CTRL-42]
|
||||
_ = x[BMI1-43]
|
||||
_ = x[BMI2-44]
|
||||
_ = x[CETIBT-45]
|
||||
_ = x[CETSS-46]
|
||||
_ = x[CLDEMOTE-47]
|
||||
_ = x[CLMUL-48]
|
||||
_ = x[CLZERO-49]
|
||||
_ = x[CMOV-50]
|
||||
_ = x[CMPCCXADD-51]
|
||||
_ = x[CMPSB_SCADBS_SHORT-52]
|
||||
_ = x[CMPXCHG8-53]
|
||||
_ = x[CPBOOST-54]
|
||||
_ = x[CPPC-55]
|
||||
_ = x[CX16-56]
|
||||
_ = x[EFER_LMSLE_UNS-57]
|
||||
_ = x[ENQCMD-58]
|
||||
_ = x[ERMS-59]
|
||||
_ = x[F16C-60]
|
||||
_ = x[FLUSH_L1D-61]
|
||||
_ = x[FMA3-62]
|
||||
_ = x[FMA4-63]
|
||||
_ = x[FP128-64]
|
||||
_ = x[FP256-65]
|
||||
_ = x[FSRM-66]
|
||||
_ = x[FXSR-67]
|
||||
_ = x[FXSROPT-68]
|
||||
_ = x[GFNI-69]
|
||||
_ = x[HLE-70]
|
||||
_ = x[HRESET-71]
|
||||
_ = x[HTT-72]
|
||||
_ = x[HWA-73]
|
||||
_ = x[HYBRID_CPU-74]
|
||||
_ = x[HYPERVISOR-75]
|
||||
_ = x[IA32_ARCH_CAP-76]
|
||||
_ = x[IA32_CORE_CAP-77]
|
||||
_ = x[IBPB-78]
|
||||
_ = x[IBPB_BRTYPE-79]
|
||||
_ = x[IBRS-80]
|
||||
_ = x[IBRS_PREFERRED-81]
|
||||
_ = x[IBRS_PROVIDES_SMP-82]
|
||||
_ = x[IBS-83]
|
||||
_ = x[IBSBRNTRGT-84]
|
||||
_ = x[IBSFETCHSAM-85]
|
||||
_ = x[IBSFFV-86]
|
||||
_ = x[IBSOPCNT-87]
|
||||
_ = x[IBSOPCNTEXT-88]
|
||||
_ = x[IBSOPSAM-89]
|
||||
_ = x[IBSRDWROPCNT-90]
|
||||
_ = x[IBSRIPINVALIDCHK-91]
|
||||
_ = x[IBS_FETCH_CTLX-92]
|
||||
_ = x[IBS_OPDATA4-93]
|
||||
_ = x[IBS_OPFUSE-94]
|
||||
_ = x[IBS_PREVENTHOST-95]
|
||||
_ = x[IBS_ZEN4-96]
|
||||
_ = x[IDPRED_CTRL-97]
|
||||
_ = x[INT_WBINVD-98]
|
||||
_ = x[INVLPGB-99]
|
||||
_ = x[KEYLOCKER-100]
|
||||
_ = x[KEYLOCKERW-101]
|
||||
_ = x[LAHF-102]
|
||||
_ = x[LAM-103]
|
||||
_ = x[LBRVIRT-104]
|
||||
_ = x[LZCNT-105]
|
||||
_ = x[MCAOVERFLOW-106]
|
||||
_ = x[MCDT_NO-107]
|
||||
_ = x[MCOMMIT-108]
|
||||
_ = x[MD_CLEAR-109]
|
||||
_ = x[MMX-110]
|
||||
_ = x[MMXEXT-111]
|
||||
_ = x[MOVBE-112]
|
||||
_ = x[MOVDIR64B-113]
|
||||
_ = x[MOVDIRI-114]
|
||||
_ = x[MOVSB_ZL-115]
|
||||
_ = x[MOVU-116]
|
||||
_ = x[MPX-117]
|
||||
_ = x[MSRIRC-118]
|
||||
_ = x[MSRLIST-119]
|
||||
_ = x[MSR_PAGEFLUSH-120]
|
||||
_ = x[NRIPS-121]
|
||||
_ = x[NX-122]
|
||||
_ = x[OSXSAVE-123]
|
||||
_ = x[PCONFIG-124]
|
||||
_ = x[POPCNT-125]
|
||||
_ = x[PPIN-126]
|
||||
_ = x[PREFETCHI-127]
|
||||
_ = x[PSFD-128]
|
||||
_ = x[RDPRU-129]
|
||||
_ = x[RDRAND-130]
|
||||
_ = x[RDSEED-131]
|
||||
_ = x[RDTSCP-132]
|
||||
_ = x[RRSBA_CTRL-133]
|
||||
_ = x[RTM-134]
|
||||
_ = x[RTM_ALWAYS_ABORT-135]
|
||||
_ = x[SBPB-136]
|
||||
_ = x[SERIALIZE-137]
|
||||
_ = x[SEV-138]
|
||||
_ = x[SEV_64BIT-139]
|
||||
_ = x[SEV_ALTERNATIVE-140]
|
||||
_ = x[SEV_DEBUGSWAP-141]
|
||||
_ = x[SEV_ES-142]
|
||||
_ = x[SEV_RESTRICTED-143]
|
||||
_ = x[SEV_SNP-144]
|
||||
_ = x[SGX-145]
|
||||
_ = x[SGXLC-146]
|
||||
_ = x[SHA-147]
|
||||
_ = x[SME-148]
|
||||
_ = x[SME_COHERENT-149]
|
||||
_ = x[SM3_X86-150]
|
||||
_ = x[SM4_X86-151]
|
||||
_ = x[SPEC_CTRL_SSBD-152]
|
||||
_ = x[SRBDS_CTRL-153]
|
||||
_ = x[SRSO_MSR_FIX-154]
|
||||
_ = x[SRSO_NO-155]
|
||||
_ = x[SRSO_USER_KERNEL_NO-156]
|
||||
_ = x[SSE-157]
|
||||
_ = x[SSE2-158]
|
||||
_ = x[SSE3-159]
|
||||
_ = x[SSE4-160]
|
||||
_ = x[SSE42-161]
|
||||
_ = x[SSE4A-162]
|
||||
_ = x[SSSE3-163]
|
||||
_ = x[STIBP-164]
|
||||
_ = x[STIBP_ALWAYSON-165]
|
||||
_ = x[STOSB_SHORT-166]
|
||||
_ = x[SUCCOR-167]
|
||||
_ = x[SVM-168]
|
||||
_ = x[SVMDA-169]
|
||||
_ = x[SVMFBASID-170]
|
||||
_ = x[SVML-171]
|
||||
_ = x[SVMNP-172]
|
||||
_ = x[SVMPF-173]
|
||||
_ = x[SVMPFT-174]
|
||||
_ = x[SYSCALL-175]
|
||||
_ = x[SYSEE-176]
|
||||
_ = x[TBM-177]
|
||||
_ = x[TDX_GUEST-178]
|
||||
_ = x[TLB_FLUSH_NESTED-179]
|
||||
_ = x[TME-180]
|
||||
_ = x[TOPEXT-181]
|
||||
_ = x[TSCRATEMSR-182]
|
||||
_ = x[TSXLDTRK-183]
|
||||
_ = x[VAES-184]
|
||||
_ = x[VMCBCLEAN-185]
|
||||
_ = x[VMPL-186]
|
||||
_ = x[VMSA_REGPROT-187]
|
||||
_ = x[VMX-188]
|
||||
_ = x[VPCLMULQDQ-189]
|
||||
_ = x[VTE-190]
|
||||
_ = x[WAITPKG-191]
|
||||
_ = x[WBNOINVD-192]
|
||||
_ = x[WRMSRNS-193]
|
||||
_ = x[X87-194]
|
||||
_ = x[XGETBV1-195]
|
||||
_ = x[XOP-196]
|
||||
_ = x[XSAVE-197]
|
||||
_ = x[XSAVEC-198]
|
||||
_ = x[XSAVEOPT-199]
|
||||
_ = x[XSAVES-200]
|
||||
_ = x[AESARM-201]
|
||||
_ = x[ARMCPUID-202]
|
||||
_ = x[ASIMD-203]
|
||||
_ = x[ASIMDDP-204]
|
||||
_ = x[ASIMDHP-205]
|
||||
_ = x[ASIMDRDM-206]
|
||||
_ = x[ATOMICS-207]
|
||||
_ = x[CRC32-208]
|
||||
_ = x[DCPOP-209]
|
||||
_ = x[EVTSTRM-210]
|
||||
_ = x[FCMA-211]
|
||||
_ = x[FHM-212]
|
||||
_ = x[FP-213]
|
||||
_ = x[FPHP-214]
|
||||
_ = x[GPA-215]
|
||||
_ = x[JSCVT-216]
|
||||
_ = x[LRCPC-217]
|
||||
_ = x[PMULL-218]
|
||||
_ = x[RNDR-219]
|
||||
_ = x[TLB-220]
|
||||
_ = x[TS-221]
|
||||
_ = x[SHA1-222]
|
||||
_ = x[SHA2-223]
|
||||
_ = x[SHA3-224]
|
||||
_ = x[SHA512-225]
|
||||
_ = x[SM3-226]
|
||||
_ = x[SM4-227]
|
||||
_ = x[SVE-228]
|
||||
_ = x[lastID-229]
|
||||
_ = x[firstID-0]
|
||||
}
|
||||
|
||||
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
||||
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAMXTRANSPOSEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSM3_X86SM4_X86SPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID"
|
||||
|
||||
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 90, 93, 98, 107, 116, 125, 129, 139, 151, 159, 167, 175, 183, 190, 200, 210, 218, 228, 239, 247, 257, 275, 290, 297, 309, 316, 323, 334, 346, 354, 358, 362, 368, 373, 381, 386, 392, 396, 405, 423, 431, 438, 442, 446, 460, 466, 470, 474, 483, 487, 491, 496, 501, 505, 509, 516, 520, 523, 529, 532, 535, 545, 555, 568, 581, 585, 596, 600, 614, 631, 634, 644, 655, 661, 669, 680, 688, 700, 716, 730, 741, 751, 766, 774, 785, 795, 802, 811, 821, 825, 828, 835, 840, 851, 858, 865, 873, 876, 882, 887, 896, 903, 911, 915, 918, 924, 931, 944, 949, 951, 958, 965, 971, 975, 984, 988, 993, 999, 1005, 1011, 1021, 1024, 1040, 1044, 1053, 1056, 1065, 1080, 1093, 1099, 1113, 1120, 1123, 1128, 1131, 1134, 1146, 1160, 1170, 1182, 1189, 1208, 1211, 1215, 1219, 1223, 1228, 1233, 1238, 1243, 1257, 1268, 1274, 1277, 1282, 1291, 1295, 1300, 1305, 1311, 1318, 1323, 1326, 1335, 1351, 1354, 1360, 1370, 1378, 1382, 1391, 1395, 1407, 1410, 1420, 1423, 1430, 1438, 1445, 1448, 1455, 1458, 1463, 1469, 1477, 1483, 1489, 1497, 1502, 1509, 1516, 1524, 1531, 1536, 1541, 1548, 1552, 1555, 1557, 1561, 1564, 1569, 1574, 1579, 1583, 1586, 1588, 1592, 1596, 1600, 1606, 1609, 1612, 1615, 1621}
|
||||
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 97, 102, 105, 110, 119, 128, 137, 141, 151, 163, 171, 179, 187, 195, 202, 212, 222, 230, 240, 251, 259, 269, 287, 302, 309, 321, 328, 335, 346, 358, 366, 370, 374, 380, 385, 393, 398, 404, 408, 417, 435, 443, 450, 454, 458, 472, 478, 482, 486, 495, 499, 503, 508, 513, 517, 521, 528, 532, 535, 541, 544, 547, 557, 567, 580, 593, 597, 608, 612, 626, 643, 646, 656, 667, 673, 681, 692, 700, 712, 728, 742, 753, 763, 778, 786, 797, 807, 814, 823, 833, 837, 840, 847, 852, 863, 870, 877, 885, 888, 894, 899, 908, 915, 923, 927, 930, 936, 943, 956, 961, 963, 970, 977, 983, 987, 996, 1000, 1005, 1011, 1017, 1023, 1033, 1036, 1052, 1056, 1065, 1068, 1077, 1092, 1105, 1111, 1125, 1132, 1135, 1140, 1143, 1146, 1158, 1165, 1172, 1186, 1196, 1208, 1215, 1234, 1237, 1241, 1245, 1249, 1254, 1259, 1264, 1269, 1283, 1294, 1300, 1303, 1308, 1317, 1321, 1326, 1331, 1337, 1344, 1349, 1352, 1361, 1377, 1380, 1386, 1396, 1404, 1408, 1417, 1421, 1433, 1436, 1446, 1449, 1456, 1464, 1471, 1474, 1481, 1484, 1489, 1495, 1503, 1509, 1515, 1523, 1528, 1535, 1542, 1550, 1557, 1562, 1567, 1574, 1578, 1581, 1583, 1587, 1590, 1595, 1600, 1605, 1609, 1612, 1614, 1618, 1622, 1626, 1632, 1635, 1638, 1641, 1647}
|
||||
|
||||
func (i FeatureID) String() string {
|
||||
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
|
||||
|
|
|
|||
80
vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
generated
vendored
80
vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
generated
vendored
|
|
@ -65,9 +65,16 @@ func sysctlGetInt64(unknown int, names ...string) int {
|
|||
return unknown
|
||||
}
|
||||
|
||||
func setFeature(c *CPUInfo, name string, feature FeatureID) {
|
||||
c.featureSet.setIf(sysctlGetBool(name), feature)
|
||||
func setFeature(c *CPUInfo, feature FeatureID, aliases ...string) {
|
||||
for _, alias := range aliases {
|
||||
set := sysctlGetBool(alias)
|
||||
c.featureSet.setIf(set, feature)
|
||||
if set {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
|
||||
c.BrandName = sysctlGetString("machdep.cpu.brand_string")
|
||||
|
||||
|
|
@ -87,41 +94,36 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
|
|||
c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
|
||||
c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
|
||||
|
||||
// from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile
|
||||
setFeature(c, "hw.optional.arm.FEAT_AES", AESARM)
|
||||
setFeature(c, "hw.optional.AdvSIMD", ASIMD)
|
||||
setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP)
|
||||
setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM)
|
||||
setFeature(c, "hw.optional.FEAT_CRC32", CRC32)
|
||||
setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP)
|
||||
// setFeature(c, "", EVTSTRM)
|
||||
setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA)
|
||||
setFeature(c, "hw.optional.arm.FEAT_FHM", FHM)
|
||||
setFeature(c, "hw.optional.arm.FEAT_FP", FP)
|
||||
setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP)
|
||||
setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA)
|
||||
setFeature(c, "hw.optional.arm.FEAT_RNG", RNDR)
|
||||
setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT)
|
||||
setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC)
|
||||
setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL)
|
||||
setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1)
|
||||
setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2)
|
||||
setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3)
|
||||
setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512)
|
||||
setFeature(c, "hw.optional.arm.FEAT_TLBIOS", TLB)
|
||||
setFeature(c, "hw.optional.arm.FEAT_TLBIRANGE", TLB)
|
||||
setFeature(c, "hw.optional.arm.FEAT_FlagM", TS)
|
||||
setFeature(c, "hw.optional.arm.FEAT_FlagM2", TS)
|
||||
// setFeature(c, "", SM3)
|
||||
// setFeature(c, "", SM4)
|
||||
setFeature(c, "hw.optional.arm.FEAT_SVE", SVE)
|
||||
|
||||
// from empirical observation
|
||||
setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP)
|
||||
setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS)
|
||||
setFeature(c, "hw.optional.floatingpoint", FP)
|
||||
setFeature(c, "hw.optional.armv8_2_sha3", SHA3)
|
||||
setFeature(c, "hw.optional.armv8_2_sha512", SHA512)
|
||||
setFeature(c, "hw.optional.armv8_3_compnum", FCMA)
|
||||
setFeature(c, "hw.optional.armv8_crc32", CRC32)
|
||||
// ARM features:
|
||||
//
|
||||
// Note: On some Apple Silicon system, some feats have aliases. See:
|
||||
// https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics
|
||||
// When so, we look at all aliases and consider a feature available when at least one identifier matches.
|
||||
setFeature(c, AESARM, "hw.optional.arm.FEAT_AES") // AES instructions
|
||||
setFeature(c, ASIMD, "hw.optional.arm.AdvSIMD", "hw.optional.neon") // Advanced SIMD
|
||||
setFeature(c, ASIMDDP, "hw.optional.arm.FEAT_DotProd") // SIMD Dot Product
|
||||
setFeature(c, ASIMDHP, "hw.optional.arm.AdvSIMD_HPFPCvt", "hw.optional.neon_hpfp") // Advanced SIMD half-precision floating point
|
||||
setFeature(c, ASIMDRDM, "hw.optional.arm.FEAT_RDM") // Rounding Double Multiply Accumulate/Subtract
|
||||
setFeature(c, ATOMICS, "hw.optional.arm.FEAT_LSE", "hw.optional.armv8_1_atomics") // Large System Extensions (LSE)
|
||||
setFeature(c, CRC32, "hw.optional.arm.FEAT_CRC32", "hw.optional.armv8_crc32") // CRC32/CRC32C instructions
|
||||
setFeature(c, DCPOP, "hw.optional.arm.FEAT_DPB") // Data cache clean to Point of Persistence (DC CVAP)
|
||||
setFeature(c, EVTSTRM, "hw.optional.arm.FEAT_ECV") // Generic timer
|
||||
setFeature(c, FCMA, "hw.optional.arm.FEAT_FCMA", "hw.optional.armv8_3_compnum") // Floating point complex number addition and multiplication
|
||||
setFeature(c, FHM, "hw.optional.armv8_2_fhm", "hw.optional.arm.FEAT_FHM") // FMLAL and FMLSL instructions
|
||||
setFeature(c, FP, "hw.optional.floatingpoint") // Single-precision and double-precision floating point
|
||||
setFeature(c, FPHP, "hw.optional.arm.FEAT_FP16", "hw.optional.neon_fp16") // Half-precision floating point
|
||||
setFeature(c, GPA, "hw.optional.arm.FEAT_PAuth") // Generic Pointer Authentication
|
||||
setFeature(c, JSCVT, "hw.optional.arm.FEAT_JSCVT") // Javascript-style double->int convert (FJCVTZS)
|
||||
setFeature(c, LRCPC, "hw.optional.arm.FEAT_LRCPC") // Weaker release consistency (LDAPR, etc)
|
||||
setFeature(c, PMULL, "hw.optional.arm.FEAT_PMULL") // Polynomial Multiply instructions (PMULL/PMULL2)
|
||||
setFeature(c, RNDR, "hw.optional.arm.FEAT_RNG") // Random Number instructions
|
||||
setFeature(c, TLB, "hw.optional.arm.FEAT_TLBIOS", "hw.optional.arm.FEAT_TLBIRANGE") // Outer Shareable and TLB range maintenance instructions
|
||||
setFeature(c, TS, "hw.optional.arm.FEAT_FlagM", "hw.optional.arm.FEAT_FlagM2") // Flag manipulation instructions
|
||||
setFeature(c, SHA1, "hw.optional.arm.FEAT_SHA1") // SHA-1 instructions (SHA1C, etc)
|
||||
setFeature(c, SHA2, "hw.optional.arm.FEAT_SHA256") // SHA-2 instructions (SHA256H, etc)
|
||||
setFeature(c, SHA3, "hw.optional.arm.FEAT_SHA3") // SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
|
||||
setFeature(c, SHA512, "hw.optional.arm.FEAT_SHA512") // SHA512 instructions
|
||||
setFeature(c, SM3, "hw.optional.arm.FEAT_SM3") // SM3 instructions
|
||||
setFeature(c, SM4, "hw.optional.arm.FEAT_SM4") // SM4 instructions
|
||||
setFeature(c, SVE, "hw.optional.arm.FEAT_SVE") // Scalable Vector Extension
|
||||
}
|
||||
|
|
|
|||
22
vendor/github.com/miekg/dns/edns.go
generated
vendored
22
vendor/github.com/miekg/dns/edns.go
generated
vendored
|
|
@ -317,30 +317,30 @@ func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
|||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// We might don't need to complain either
|
||||
if e.SourceNetmask != 0 {
|
||||
return nil, errors.New("dns: bad address family")
|
||||
return nil, errors.New("bad address family")
|
||||
}
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
return nil, errors.New("bad netmask")
|
||||
}
|
||||
if len(e.Address.To4()) != net.IPv4len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
return nil, errors.New("bad address")
|
||||
}
|
||||
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
return nil, errors.New("bad netmask")
|
||||
}
|
||||
if len(e.Address) != net.IPv6len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
return nil, errors.New("bad address")
|
||||
}
|
||||
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
default:
|
||||
return nil, errors.New("dns: bad address family")
|
||||
return nil, errors.New("bad address family")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
|
@ -357,25 +357,25 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error {
|
|||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// It's okay to accept such a packet
|
||||
if e.SourceNetmask != 0 {
|
||||
return errors.New("dns: bad address family")
|
||||
return errors.New("bad address family")
|
||||
}
|
||||
e.Address = net.IPv4(0, 0, 0, 0)
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
return errors.New("bad netmask")
|
||||
}
|
||||
addr := make(net.IP, net.IPv4len)
|
||||
copy(addr, b[4:])
|
||||
e.Address = addr.To16()
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
return errors.New("bad netmask")
|
||||
}
|
||||
addr := make(net.IP, net.IPv6len)
|
||||
copy(addr, b[4:])
|
||||
e.Address = addr
|
||||
default:
|
||||
return errors.New("dns: bad address family")
|
||||
return errors.New("bad address family")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -720,7 +720,7 @@ func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
|
|||
case 2:
|
||||
e.Timeout = binary.BigEndian.Uint16(b)
|
||||
default:
|
||||
return fmt.Errorf("dns: length mismatch, want 0/2 but got %d", len(b))
|
||||
return fmt.Errorf("length mismatch, want 0/2 but got %d", len(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
25
vendor/github.com/miekg/dns/msg.go
generated
vendored
25
vendor/github.com/miekg/dns/msg.go
generated
vendored
|
|
@ -872,7 +872,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
|
|||
// TODO(miek) make this an error?
|
||||
// use PackOpt to let people tell how detailed the error reporting should be?
|
||||
// if off != len(msg) {
|
||||
// // println("dns: extra bytes in dns packet", off, "<", len(msg))
|
||||
// // println("dns: extra bytes in dns packet", off, "<", len(msg))
|
||||
// }
|
||||
return err
|
||||
}
|
||||
|
|
@ -1123,23 +1123,28 @@ func unpackQuestion(msg []byte, off int) (Question, int, error) {
|
|||
)
|
||||
q.Name, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return q, off, err
|
||||
return q, off, fmt.Errorf("bad question name: %w", err)
|
||||
}
|
||||
if off == len(msg) {
|
||||
return q, off, nil
|
||||
}
|
||||
q.Qtype, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return q, off, err
|
||||
return q, off, fmt.Errorf("bad question qtype: %w", err)
|
||||
}
|
||||
if off == len(msg) {
|
||||
return q, off, nil
|
||||
}
|
||||
q.Qclass, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return q, off, fmt.Errorf("bad question qclass: %w", err)
|
||||
}
|
||||
|
||||
if off == len(msg) {
|
||||
return q, off, nil
|
||||
}
|
||||
return q, off, err
|
||||
|
||||
return q, off, nil
|
||||
}
|
||||
|
||||
func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
|
||||
|
|
@ -1177,27 +1182,27 @@ func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
|
|||
)
|
||||
dh.Id, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return dh, off, err
|
||||
return dh, off, fmt.Errorf("bad header id: %w", err)
|
||||
}
|
||||
dh.Bits, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return dh, off, err
|
||||
return dh, off, fmt.Errorf("bad header bits: %w", err)
|
||||
}
|
||||
dh.Qdcount, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return dh, off, err
|
||||
return dh, off, fmt.Errorf("bad header question count: %w", err)
|
||||
}
|
||||
dh.Ancount, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return dh, off, err
|
||||
return dh, off, fmt.Errorf("bad header answer count: %w", err)
|
||||
}
|
||||
dh.Nscount, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return dh, off, err
|
||||
return dh, off, fmt.Errorf("bad header ns count: %w", err)
|
||||
}
|
||||
dh.Arcount, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return dh, off, err
|
||||
return dh, off, fmt.Errorf("bad header extra count: %w", err)
|
||||
}
|
||||
return dh, off, nil
|
||||
}
|
||||
|
|
|
|||
12
vendor/github.com/miekg/dns/server.go
generated
vendored
12
vendor/github.com/miekg/dns/server.go
generated
vendored
|
|
@ -44,6 +44,8 @@ type ResponseWriter interface {
|
|||
LocalAddr() net.Addr
|
||||
// RemoteAddr returns the net.Addr of the client that sent the current request.
|
||||
RemoteAddr() net.Addr
|
||||
// Network returns the value of the Net field of the Server (e.g., "tcp", "tcp-tls").
|
||||
Network() string
|
||||
// WriteMsg writes a reply back to the client.
|
||||
WriteMsg(*Msg) error
|
||||
// Write writes a raw buffer back to the client.
|
||||
|
|
@ -77,6 +79,7 @@ type response struct {
|
|||
udpSession *SessionUDP // oob data to get egress interface right
|
||||
pcSession net.Addr // address to use when writing to a generic net.PacketConn
|
||||
writer Writer // writer to output the raw DNS bits
|
||||
network string // corresponding Server.Net value
|
||||
}
|
||||
|
||||
// handleRefused returns a HandlerFunc that returns REFUSED for every request it gets.
|
||||
|
|
@ -332,7 +335,7 @@ func (srv *Server) ListenAndServe() error {
|
|||
return srv.serveTCP(l)
|
||||
case "tcp-tls", "tcp4-tls", "tcp6-tls":
|
||||
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
|
||||
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
|
||||
return errors.New("neither Certificates nor GetCertificate set in config")
|
||||
}
|
||||
network := strings.TrimSuffix(srv.Net, "-tls")
|
||||
l, err := listenTCP(network, addr, srv.ReusePort, srv.ReuseAddr)
|
||||
|
|
@ -557,7 +560,7 @@ func (srv *Server) serveUDP(l net.PacketConn) error {
|
|||
|
||||
// Serve a new TCP connection.
|
||||
func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
|
||||
w := &response{tsigProvider: srv.tsigProvider(), tcp: rw}
|
||||
w := &response{tsigProvider: srv.tsigProvider(), tcp: rw, network: srv.Net}
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
} else {
|
||||
|
|
@ -612,7 +615,7 @@ func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
|
|||
|
||||
// Serve a new UDP request.
|
||||
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn, udpSession *SessionUDP, pcSession net.Addr) {
|
||||
w := &response{tsigProvider: srv.tsigProvider(), udp: u, udpSession: udpSession, pcSession: pcSession}
|
||||
w := &response{tsigProvider: srv.tsigProvider(), udp: u, udpSession: udpSession, pcSession: pcSession, network: srv.Net}
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
} else {
|
||||
|
|
@ -818,6 +821,9 @@ func (w *response) RemoteAddr() net.Addr {
|
|||
}
|
||||
}
|
||||
|
||||
// Network implements the ResponseWriter.Network method.
|
||||
func (w *response) Network() string { return w.network }
|
||||
|
||||
// TsigStatus implements the ResponseWriter.TsigStatus method.
|
||||
func (w *response) TsigStatus() error { return w.tsigStatus }
|
||||
|
||||
|
|
|
|||
58
vendor/github.com/miekg/dns/svcb.go
generated
vendored
58
vendor/github.com/miekg/dns/svcb.go
generated
vendored
|
|
@ -298,7 +298,7 @@ func (s *SVCBMandatory) pack() ([]byte, error) {
|
|||
|
||||
func (s *SVCBMandatory) unpack(b []byte) error {
|
||||
if len(b)%2 != 0 {
|
||||
return errors.New("dns: svcbmandatory: value length is not a multiple of 2")
|
||||
return errors.New("bad svcbmandatory: value length is not a multiple of 2")
|
||||
}
|
||||
codes := make([]SVCBKey, 0, len(b)/2)
|
||||
for i := 0; i < len(b); i += 2 {
|
||||
|
|
@ -395,10 +395,10 @@ func (s *SVCBAlpn) pack() ([]byte, error) {
|
|||
b := make([]byte, 0, 10*len(s.Alpn))
|
||||
for _, e := range s.Alpn {
|
||||
if e == "" {
|
||||
return nil, errors.New("dns: svcbalpn: empty alpn-id")
|
||||
return nil, errors.New("bad svcbalpn: empty alpn-id")
|
||||
}
|
||||
if len(e) > 255 {
|
||||
return nil, errors.New("dns: svcbalpn: alpn-id too long")
|
||||
return nil, errors.New("bad svcbalpn: alpn-id too long")
|
||||
}
|
||||
b = append(b, byte(len(e)))
|
||||
b = append(b, e...)
|
||||
|
|
@ -413,7 +413,7 @@ func (s *SVCBAlpn) unpack(b []byte) error {
|
|||
length := int(b[i])
|
||||
i++
|
||||
if i+length > len(b) {
|
||||
return errors.New("dns: svcbalpn: alpn array overflowing")
|
||||
return errors.New("bad svcbalpn: alpn array overflowing")
|
||||
}
|
||||
alpn = append(alpn, string(b[i:i+length]))
|
||||
i += length
|
||||
|
|
@ -433,13 +433,13 @@ func (s *SVCBAlpn) parse(b string) error {
|
|||
for p := 0; p < len(b); {
|
||||
c, q := nextByte(b, p)
|
||||
if q == 0 {
|
||||
return errors.New("dns: svcbalpn: unterminated escape")
|
||||
return errors.New("bad svcbalpn: unterminated escape")
|
||||
}
|
||||
p += q
|
||||
// If we find a comma, we have finished reading an alpn.
|
||||
if c == ',' {
|
||||
if len(a) == 0 {
|
||||
return errors.New("dns: svcbalpn: empty protocol identifier")
|
||||
return errors.New("bad svcbalpn: empty protocol identifier")
|
||||
}
|
||||
alpn = append(alpn, string(a))
|
||||
a = []byte{}
|
||||
|
|
@ -449,10 +449,10 @@ func (s *SVCBAlpn) parse(b string) error {
|
|||
if c == '\\' {
|
||||
dc, dq := nextByte(b, p)
|
||||
if dq == 0 {
|
||||
return errors.New("dns: svcbalpn: unterminated escape decoding comma-separated list")
|
||||
return errors.New("bad svcbalpn: unterminated escape decoding comma-separated list")
|
||||
}
|
||||
if dc != '\\' && dc != ',' {
|
||||
return errors.New("dns: svcbalpn: bad escaped character decoding comma-separated list")
|
||||
return errors.New("bad svcbalpn: bad escaped character decoding comma-separated list")
|
||||
}
|
||||
p += dq
|
||||
c = dc
|
||||
|
|
@ -461,7 +461,7 @@ func (s *SVCBAlpn) parse(b string) error {
|
|||
}
|
||||
// Add the final alpn.
|
||||
if len(a) == 0 {
|
||||
return errors.New("dns: svcbalpn: last protocol identifier empty")
|
||||
return errors.New("bad svcbalpn: last protocol identifier empty")
|
||||
}
|
||||
s.Alpn = append(alpn, string(a))
|
||||
return nil
|
||||
|
|
@ -499,14 +499,14 @@ func (*SVCBNoDefaultAlpn) len() int { return 0 }
|
|||
|
||||
func (*SVCBNoDefaultAlpn) unpack(b []byte) error {
|
||||
if len(b) != 0 {
|
||||
return errors.New("dns: svcbnodefaultalpn: no-default-alpn must have no value")
|
||||
return errors.New("bad svcbnodefaultalpn: no-default-alpn must have no value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SVCBNoDefaultAlpn) parse(b string) error {
|
||||
if b != "" {
|
||||
return errors.New("dns: svcbnodefaultalpn: no-default-alpn must have no value")
|
||||
return errors.New("bad svcbnodefaultalpn: no-default-alpn must have no value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -529,7 +529,7 @@ func (s *SVCBPort) copy() SVCBKeyValue { return &SVCBPort{s.Port} }
|
|||
|
||||
func (s *SVCBPort) unpack(b []byte) error {
|
||||
if len(b) != 2 {
|
||||
return errors.New("dns: svcbport: port length is not exactly 2 octets")
|
||||
return errors.New("bad svcbport: port length is not exactly 2 octets")
|
||||
}
|
||||
s.Port = binary.BigEndian.Uint16(b)
|
||||
return nil
|
||||
|
|
@ -544,7 +544,7 @@ func (s *SVCBPort) pack() ([]byte, error) {
|
|||
func (s *SVCBPort) parse(b string) error {
|
||||
port, err := strconv.ParseUint(b, 10, 16)
|
||||
if err != nil {
|
||||
return errors.New("dns: svcbport: port out of range")
|
||||
return errors.New("bad svcbport: port out of range")
|
||||
}
|
||||
s.Port = uint16(port)
|
||||
return nil
|
||||
|
|
@ -577,7 +577,7 @@ func (s *SVCBIPv4Hint) pack() ([]byte, error) {
|
|||
for _, e := range s.Hint {
|
||||
x := e.To4()
|
||||
if x == nil {
|
||||
return nil, errors.New("dns: svcbipv4hint: expected ipv4, hint is ipv6")
|
||||
return nil, errors.New("bad svcbipv4hint: expected ipv4, hint is ipv6")
|
||||
}
|
||||
b = append(b, x...)
|
||||
}
|
||||
|
|
@ -586,7 +586,7 @@ func (s *SVCBIPv4Hint) pack() ([]byte, error) {
|
|||
|
||||
func (s *SVCBIPv4Hint) unpack(b []byte) error {
|
||||
if len(b) == 0 || len(b)%4 != 0 {
|
||||
return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
|
||||
return errors.New("bad svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
|
||||
}
|
||||
b = cloneSlice(b)
|
||||
x := make([]net.IP, 0, len(b)/4)
|
||||
|
|
@ -611,10 +611,10 @@ func (s *SVCBIPv4Hint) String() string {
|
|||
|
||||
func (s *SVCBIPv4Hint) parse(b string) error {
|
||||
if b == "" {
|
||||
return errors.New("dns: svcbipv4hint: empty hint")
|
||||
return errors.New("bad svcbipv4hint: empty hint")
|
||||
}
|
||||
if strings.Contains(b, ":") {
|
||||
return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6")
|
||||
return errors.New("bad svcbipv4hint: expected ipv4, got ipv6")
|
||||
}
|
||||
|
||||
hint := make([]net.IP, 0, strings.Count(b, ",")+1)
|
||||
|
|
@ -623,7 +623,7 @@ func (s *SVCBIPv4Hint) parse(b string) error {
|
|||
e, b, _ = strings.Cut(b, ",")
|
||||
ip := net.ParseIP(e).To4()
|
||||
if ip == nil {
|
||||
return errors.New("dns: svcbipv4hint: bad ip")
|
||||
return errors.New("bad svcbipv4hint: bad ip")
|
||||
}
|
||||
hint = append(hint, ip)
|
||||
}
|
||||
|
|
@ -671,7 +671,7 @@ func (s *SVCBECHConfig) unpack(b []byte) error {
|
|||
func (s *SVCBECHConfig) parse(b string) error {
|
||||
x, err := fromBase64([]byte(b))
|
||||
if err != nil {
|
||||
return errors.New("dns: svcbech: bad base64 ech")
|
||||
return errors.New("bad svcbech: bad base64 ech")
|
||||
}
|
||||
s.ECH = x
|
||||
return nil
|
||||
|
|
@ -699,7 +699,7 @@ func (s *SVCBIPv6Hint) pack() ([]byte, error) {
|
|||
b := make([]byte, 0, 16*len(s.Hint))
|
||||
for _, e := range s.Hint {
|
||||
if len(e) != net.IPv6len || e.To4() != nil {
|
||||
return nil, errors.New("dns: svcbipv6hint: expected ipv6, hint is ipv4")
|
||||
return nil, errors.New("bad svcbipv6hint: expected ipv6, hint is ipv4")
|
||||
}
|
||||
b = append(b, e...)
|
||||
}
|
||||
|
|
@ -708,14 +708,14 @@ func (s *SVCBIPv6Hint) pack() ([]byte, error) {
|
|||
|
||||
func (s *SVCBIPv6Hint) unpack(b []byte) error {
|
||||
if len(b) == 0 || len(b)%16 != 0 {
|
||||
return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16")
|
||||
return errors.New("bas svcbipv6hint: ipv6 address byte array length not a multiple of 16")
|
||||
}
|
||||
b = cloneSlice(b)
|
||||
x := make([]net.IP, 0, len(b)/16)
|
||||
for i := 0; i < len(b); i += 16 {
|
||||
ip := net.IP(b[i : i+16])
|
||||
if ip.To4() != nil {
|
||||
return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4")
|
||||
return errors.New("bad svcbipv6hint: expected ipv6, got ipv4")
|
||||
}
|
||||
x = append(x, ip)
|
||||
}
|
||||
|
|
@ -736,7 +736,7 @@ func (s *SVCBIPv6Hint) String() string {
|
|||
|
||||
func (s *SVCBIPv6Hint) parse(b string) error {
|
||||
if b == "" {
|
||||
return errors.New("dns: svcbipv6hint: empty hint")
|
||||
return errors.New("bad svcbipv6hint: empty hint")
|
||||
}
|
||||
|
||||
hint := make([]net.IP, 0, strings.Count(b, ",")+1)
|
||||
|
|
@ -745,10 +745,10 @@ func (s *SVCBIPv6Hint) parse(b string) error {
|
|||
e, b, _ = strings.Cut(b, ",")
|
||||
ip := net.ParseIP(e)
|
||||
if ip == nil {
|
||||
return errors.New("dns: svcbipv6hint: bad ip")
|
||||
return errors.New("bad svcbipv6hint: bad ip")
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6")
|
||||
return errors.New("bad svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6")
|
||||
}
|
||||
hint = append(hint, ip)
|
||||
}
|
||||
|
|
@ -800,7 +800,7 @@ func (s *SVCBDoHPath) unpack(b []byte) error {
|
|||
func (s *SVCBDoHPath) parse(b string) error {
|
||||
template, err := svcbParseParam(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dns: svcbdohpath: %w", err)
|
||||
return fmt.Errorf("bad svcbdohpath: %w", err)
|
||||
}
|
||||
s.Template = string(template)
|
||||
return nil
|
||||
|
|
@ -838,14 +838,14 @@ func (*SVCBOhttp) len() int { return 0 }
|
|||
|
||||
func (*SVCBOhttp) unpack(b []byte) error {
|
||||
if len(b) != 0 {
|
||||
return errors.New("dns: svcbotthp: svcbotthp must have no value")
|
||||
return errors.New("bad svcbotthp: svcbotthp must have no value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SVCBOhttp) parse(b string) error {
|
||||
if b != "" {
|
||||
return errors.New("dns: svcbotthp: svcbotthp must have no value")
|
||||
return errors.New("bad svcbotthp: svcbotthp must have no value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -878,7 +878,7 @@ func (s *SVCBLocal) unpack(b []byte) error {
|
|||
func (s *SVCBLocal) parse(b string) error {
|
||||
data, err := svcbParseParam(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dns: svcblocal: svcb private/experimental key %w", err)
|
||||
return fmt.Errorf("bad svcblocal: svcb private/experimental key %w", err)
|
||||
}
|
||||
s.Data = data
|
||||
return nil
|
||||
|
|
|
|||
2
vendor/github.com/miekg/dns/version.go
generated
vendored
2
vendor/github.com/miekg/dns/version.go
generated
vendored
|
|
@ -3,7 +3,7 @@ package dns
|
|||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = v{1, 1, 66}
|
||||
var Version = v{1, 1, 67}
|
||||
|
||||
// v holds the version of this library.
|
||||
type v struct {
|
||||
|
|
|
|||
422
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
422
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
File diff suppressed because it is too large
Load diff
2
vendor/github.com/minio/crc64nvme/crc64_arm64.go
generated
vendored
2
vendor/github.com/minio/crc64nvme/crc64_arm64.go
generated
vendored
|
|
@ -10,6 +10,6 @@ import (
|
|||
"github.com/klauspost/cpuid/v2"
|
||||
)
|
||||
|
||||
var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD) && cpuid.CPU.Supports(cpuid.PMULL)
|
||||
var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD, cpuid.PMULL, cpuid.SHA3)
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64)
|
||||
|
|
|
|||
8
vendor/github.com/minio/minio-go/v7/api-append-object.go
generated
vendored
8
vendor/github.com/minio/minio-go/v7/api-append-object.go
generated
vendored
|
|
@ -127,6 +127,10 @@ func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName stri
|
|||
|
||||
if opts.checksumType.IsSet() {
|
||||
reqMetadata.addCrc = &opts.checksumType
|
||||
reqMetadata.customHeader.Set(amzChecksumAlgo, opts.checksumType.String())
|
||||
if opts.checksumType.FullObjectRequested() {
|
||||
reqMetadata.customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Execute PUT an objectName.
|
||||
|
|
@ -183,8 +187,8 @@ func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string
|
|||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if oinfo.ChecksumMode != ChecksumFullObjectMode.String() {
|
||||
return UploadInfo{}, fmt.Errorf("append API is not allowed on objects that are not full_object checksum type: %s", oinfo.ChecksumMode)
|
||||
if oinfo.ChecksumMode != "" && oinfo.ChecksumMode != ChecksumFullObjectMode.String() {
|
||||
return UploadInfo{}, fmt.Errorf("Append() is not allowed on objects that are not of FULL_OBJECT checksum type: %s", oinfo.ChecksumMode)
|
||||
}
|
||||
opts.setChecksumParams(oinfo) // set the appropriate checksum params based on the existing object checksum metadata.
|
||||
opts.setWriteOffset(oinfo.Size) // First append must set the current object size as the offset.
|
||||
|
|
|
|||
10
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
|
|
@ -82,16 +82,12 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
|||
// avoid sha256 with non-v4 signature request or
|
||||
// HTTPS connection.
|
||||
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
|
||||
if len(hashSums) == 0 {
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
|
@ -145,11 +141,15 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
|||
if hashSums["sha256"] != nil {
|
||||
sha256Hex = hex.EncodeToString(hashSums["sha256"])
|
||||
}
|
||||
if len(hashSums) == 0 {
|
||||
if opts.AutoChecksum.IsSet() {
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String())
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String())
|
||||
}
|
||||
}
|
||||
|
||||
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
|
||||
|
|
|
|||
68
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
68
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
|
|
@ -108,19 +108,14 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
|||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
withChecksum := c.trailingHeaderSupport
|
||||
if withChecksum {
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||
|
||||
withChecksum := c.trailingHeaderSupport
|
||||
|
||||
// Aborts the multipart upload in progress, if the
|
||||
// function returns any error, since we do not resume
|
||||
|
|
@ -297,15 +292,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
|||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
opts.SendContentMd5 = false
|
||||
}
|
||||
|
||||
if !opts.SendContentMd5 {
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize)
|
||||
if err != nil {
|
||||
|
|
@ -316,7 +302,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
|||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||
|
||||
// Aborts the multipart upload if the function returns
|
||||
// any error, since we do not resume we should purge
|
||||
|
|
@ -369,12 +354,18 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
|||
md5Hash.Reset()
|
||||
md5Hash.Write(buf[:length])
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
|
||||
} else {
|
||||
}
|
||||
|
||||
if opts.AutoChecksum.IsSet() {
|
||||
// Add CRC32C instead.
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String())
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
|
|
@ -453,13 +444,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
|||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.SendContentMd5 = false
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Cancel all when an error occurs.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
|
@ -476,7 +460,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
|||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||
|
||||
// Aborts the multipart upload if the function returns
|
||||
// any error, since we do not resume we should purge
|
||||
|
|
@ -541,18 +524,22 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
|||
return UploadInfo{}, rerr
|
||||
}
|
||||
|
||||
// Calculate md5sum.
|
||||
customHeader := make(http.Header)
|
||||
if !opts.SendContentMd5 {
|
||||
// Add Checksum instead.
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(partNumber int) {
|
||||
// Calculate md5sum.
|
||||
customHeader := make(http.Header)
|
||||
if opts.AutoChecksum.IsSet() {
|
||||
// Add Checksum instead.
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String())
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid declaring variables in the for loop
|
||||
var md5Base64 string
|
||||
|
||||
|
|
@ -664,9 +651,6 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r
|
|||
if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
|
||||
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
||||
}
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.SendContentMd5 = false
|
||||
}
|
||||
|
||||
var readSeeker io.Seeker
|
||||
if size > 0 {
|
||||
|
|
@ -759,7 +743,7 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
|||
}
|
||||
}
|
||||
if addCrc {
|
||||
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||
opts.AutoChecksum.SetDefault(ChecksumFullObjectCRC32C)
|
||||
reqMetadata.addCrc = &opts.AutoChecksum
|
||||
}
|
||||
}
|
||||
|
|
|
|||
48
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
48
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
|
|
@ -26,6 +26,7 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
|
|
@ -267,7 +268,16 @@ func (opts PutObjectOptions) validate(c *Client) (err error) {
|
|||
if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
|
||||
return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
|
||||
}
|
||||
if opts.Checksum.IsSet() {
|
||||
|
||||
checkCrc := false
|
||||
for k := range opts.UserMetadata {
|
||||
if strings.HasPrefix(k, "x-amz-checksum-") {
|
||||
checkCrc = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() || checkCrc {
|
||||
switch {
|
||||
case !c.trailingHeaderSupport:
|
||||
return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled")
|
||||
|
|
@ -307,10 +317,10 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
|
|||
// be reused for best outcomes for PutObject(), pass the size always.
|
||||
//
|
||||
// NOTE: Upon errors during upload multipart operation is entirely aborted.
|
||||
func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
|
||||
func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
|
||||
opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
if objectSize < 0 && opts.DisableMultipart {
|
||||
if size < 0 && opts.DisableMultipart {
|
||||
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
|
||||
}
|
||||
|
||||
|
|
@ -319,15 +329,20 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r
|
|||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
|
||||
}
|
||||
|
||||
func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Check for largest object size allowed.
|
||||
if size > int64(maxMultipartPutObjectSize) {
|
||||
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
opts.SendContentMd5 = false
|
||||
}
|
||||
|
||||
if c.trailingHeaderSupport {
|
||||
opts.AutoChecksum.SetDefault(ChecksumCRC32C)
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// NOTE: Streaming signature is not supported by GCS.
|
||||
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
|
|
@ -385,20 +400,11 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
|||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if opts.Checksum.IsSet() {
|
||||
opts.SendContentMd5 = false
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
|
@ -437,11 +443,17 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
|||
hash.Write(buf[:length])
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
hash.Close()
|
||||
} else {
|
||||
}
|
||||
|
||||
if opts.AutoChecksum.IsSet() {
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String())
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
|
|
|
|||
7
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
7
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
|
|
@ -43,12 +43,11 @@ import (
|
|||
md5simd "github.com/minio/md5-simd"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/kvcache"
|
||||
"github.com/minio/minio-go/v7/pkg/peeker"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio-go/v7/pkg/singleflight"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
|
||||
internalutils "github.com/minio/minio-go/v7/pkg/utils"
|
||||
)
|
||||
|
||||
// Client implements Amazon S3 compatible methods.
|
||||
|
|
@ -163,7 +162,7 @@ type Options struct {
|
|||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.94"
|
||||
libraryVersion = "v7.0.95"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
|
|
@ -625,7 +624,7 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
|
|||
// - Return the error XML bytes if an error is found
|
||||
// - Make sure to always restablish the whole http response stream before returning
|
||||
func tryParseErrRespFromBody(resp *http.Response) ([]byte, error) {
|
||||
peeker := internalutils.NewPeekReadCloser(resp.Body, 5*humanize.MiByte)
|
||||
peeker := peeker.NewPeekReadCloser(resp.Body, 5*humanize.MiByte)
|
||||
defer func() {
|
||||
peeker.ReplayFromStart()
|
||||
resp.Body = peeker
|
||||
|
|
|
|||
26
vendor/github.com/minio/minio-go/v7/checksum.go
generated
vendored
26
vendor/github.com/minio/minio-go/v7/checksum.go
generated
vendored
|
|
@ -29,6 +29,7 @@ import (
|
|||
"math/bits"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/crc64nvme"
|
||||
)
|
||||
|
|
@ -432,9 +433,19 @@ func addAutoChecksumHeaders(opts *PutObjectOptions) {
|
|||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String()
|
||||
|
||||
addChecksum := true
|
||||
for k := range opts.UserMetadata {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
|
||||
addChecksum = false
|
||||
}
|
||||
}
|
||||
|
||||
if addChecksum && opts.AutoChecksum.IsSet() {
|
||||
opts.UserMetadata[amzChecksumAlgo] = opts.AutoChecksum.String()
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -446,14 +457,17 @@ func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) {
|
|||
// Add composite hash of hashes.
|
||||
crc, err := opts.AutoChecksum.CompositeChecksum(allParts)
|
||||
if err == nil {
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): crc.Encoded()}
|
||||
opts.UserMetadata = map[string]string{
|
||||
opts.AutoChecksum.Key(): crc.Encoded(),
|
||||
amzChecksumMode: ChecksumCompositeMode.String(),
|
||||
}
|
||||
}
|
||||
} else if opts.AutoChecksum.CanMergeCRC() {
|
||||
crc, err := opts.AutoChecksum.FullObjectChecksum(allParts)
|
||||
if err == nil {
|
||||
opts.UserMetadata = map[string]string{
|
||||
opts.AutoChecksum.KeyCapitalized(): crc.Encoded(),
|
||||
amzChecksumMode: ChecksumFullObjectMode.String(),
|
||||
opts.AutoChecksum.Key(): crc.Encoded(),
|
||||
amzChecksumMode: ChecksumFullObjectMode.String(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
215
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
215
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
|
|
@ -1970,7 +1970,7 @@ func testPutObjectWithChecksums() {
|
|||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
function := "PutObject(bucketName, objectName, reader,size, opts)"
|
||||
function := "PutObject(bucketName, objectName, reader, size, opts)"
|
||||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
"objectName": "",
|
||||
|
|
@ -1982,7 +1982,7 @@ func testPutObjectWithChecksums() {
|
|||
return
|
||||
}
|
||||
|
||||
c, err := NewClient(ClientConfig{})
|
||||
c, err := NewClient(ClientConfig{TrailingHeaders: true})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
return
|
||||
|
|
@ -2037,6 +2037,10 @@ func testPutObjectWithChecksums() {
|
|||
h := test.cs.Hasher()
|
||||
h.Reset()
|
||||
|
||||
if test.cs.IsSet() {
|
||||
meta["x-amz-checksum-algorithm"] = test.cs.String()
|
||||
}
|
||||
|
||||
// Test with a bad CRC - we haven't called h.Write(b), so this is a checksum of empty data
|
||||
meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
args["metadata"] = meta
|
||||
|
|
@ -2323,7 +2327,7 @@ func testPutObjectWithTrailingChecksums() {
|
|||
}
|
||||
|
||||
// Test PutObject with custom checksums.
|
||||
func testPutMultipartObjectWithChecksums(trailing bool) {
|
||||
func testPutMultipartObjectWithChecksums() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
|
|
@ -2331,7 +2335,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) {
|
|||
args := map[string]interface{}{
|
||||
"bucketName": "",
|
||||
"objectName": "",
|
||||
"opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Trailing: %v}", trailing),
|
||||
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Trailing: true}",
|
||||
}
|
||||
|
||||
if !isFullMode() {
|
||||
|
|
@ -2339,7 +2343,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) {
|
|||
return
|
||||
}
|
||||
|
||||
c, err := NewClient(ClientConfig{TrailingHeaders: trailing})
|
||||
c, err := NewClient(ClientConfig{TrailingHeaders: true})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
return
|
||||
|
|
@ -2433,12 +2437,8 @@ func testPutMultipartObjectWithChecksums(trailing bool) {
|
|||
h.Reset()
|
||||
want := hashMultiPart(b, partSize, test.cs)
|
||||
|
||||
var cs minio.ChecksumType
|
||||
rd := io.Reader(io.NopCloser(bytes.NewReader(b)))
|
||||
if trailing {
|
||||
cs = test.cs
|
||||
rd = bytes.NewReader(b)
|
||||
}
|
||||
rd := bytes.NewReader(b)
|
||||
cs := test.cs
|
||||
|
||||
// Set correct CRC.
|
||||
args["section"] = "PutObject"
|
||||
|
|
@ -2447,7 +2447,6 @@ func testPutMultipartObjectWithChecksums(trailing bool) {
|
|||
DisableMultipart: false,
|
||||
UserMetadata: nil,
|
||||
PartSize: partSize,
|
||||
AutoChecksum: test.cs,
|
||||
Checksum: cs,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -2589,11 +2588,10 @@ func testTrailingChecksums() {
|
|||
return
|
||||
}
|
||||
|
||||
hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string {
|
||||
hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) (oparts []minio.ObjectPart) {
|
||||
r := bytes.NewReader(b)
|
||||
tmp := make([]byte, partSize)
|
||||
parts := 0
|
||||
var all []byte
|
||||
for {
|
||||
n, err := io.ReadFull(r, tmp)
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
|
|
@ -2605,14 +2603,16 @@ func testTrailingChecksums() {
|
|||
parts++
|
||||
hasher.Reset()
|
||||
hasher.Write(tmp[:n])
|
||||
all = append(all, hasher.Sum(nil)...)
|
||||
oparts = append(oparts, minio.ObjectPart{
|
||||
PartNumber: parts,
|
||||
Size: int64(n),
|
||||
ChecksumCRC32C: base64.StdEncoding.EncodeToString(hasher.Sum(nil)),
|
||||
})
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
hasher.Reset()
|
||||
hasher.Write(all)
|
||||
return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts)
|
||||
return oparts
|
||||
}
|
||||
defer cleanupBucket(bucketName, c)
|
||||
tests := []struct {
|
||||
|
|
@ -2636,6 +2636,7 @@ func testTrailingChecksums() {
|
|||
DisableMultipart: false,
|
||||
UserMetadata: nil,
|
||||
PartSize: 5 << 20,
|
||||
Checksum: minio.ChecksumFullObjectCRC32C,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2647,6 +2648,7 @@ func testTrailingChecksums() {
|
|||
DisableMultipart: false,
|
||||
UserMetadata: nil,
|
||||
PartSize: 6_645_654, // Rather arbitrary size
|
||||
Checksum: minio.ChecksumFullObjectCRC32C,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2658,6 +2660,7 @@ func testTrailingChecksums() {
|
|||
DisableMultipart: false,
|
||||
UserMetadata: nil,
|
||||
PartSize: 5 << 20,
|
||||
Checksum: minio.ChecksumFullObjectCRC32C,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2669,6 +2672,7 @@ func testTrailingChecksums() {
|
|||
DisableMultipart: false,
|
||||
UserMetadata: nil,
|
||||
PartSize: 6_645_654, // Rather arbitrary size
|
||||
Checksum: minio.ChecksumFullObjectCRC32C,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -2696,7 +2700,14 @@ func testTrailingChecksums() {
|
|||
reader.Close()
|
||||
h := test.hasher
|
||||
h.Reset()
|
||||
test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher)
|
||||
|
||||
parts := hashMultiPart(b, int(test.PO.PartSize), test.hasher)
|
||||
cksum, err := minio.ChecksumFullObjectCRC32C.FullObjectChecksum(parts)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "checksum calculation failed", err)
|
||||
return
|
||||
}
|
||||
test.ChecksumCRC32C = cksum.Encoded()
|
||||
|
||||
// Set correct CRC.
|
||||
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO)
|
||||
|
|
@ -4172,7 +4183,7 @@ func testFPutObjectMultipart() {
|
|||
"opts": "",
|
||||
}
|
||||
|
||||
c, err := NewClient(ClientConfig{})
|
||||
c, err := NewClient(ClientConfig{TrailingHeaders: true})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
return
|
||||
|
|
@ -5586,6 +5597,161 @@ func testPresignedPostPolicyWrongFile() {
|
|||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// testPresignedPostPolicyEmptyFileName tests that an empty file name in the presigned post policy
|
||||
func testPresignedPostPolicyEmptyFileName() {
|
||||
// initialize logging params
|
||||
startTime := time.Now()
|
||||
testName := getFuncName()
|
||||
function := "PresignedPostPolicy(policy)"
|
||||
args := map[string]interface{}{
|
||||
"policy": "",
|
||||
}
|
||||
|
||||
c, err := NewClient(ClientConfig{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a new random bucket name.
|
||||
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
|
||||
|
||||
// Make a new bucket in 'us-east-1' (source bucket).
|
||||
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "MakeBucket failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
// Generate 33K of data.
|
||||
reader := getDataReader("datafile-33-kB")
|
||||
defer reader.Close()
|
||||
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
// Azure requires the key to not start with a number
|
||||
metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
|
||||
metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
policy := minio.NewPostPolicy()
|
||||
policy.SetBucket(bucketName)
|
||||
policy.SetKey(objectName)
|
||||
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
|
||||
policy.SetContentType("binary/octet-stream")
|
||||
policy.SetContentLengthRange(10, 1024*1024)
|
||||
policy.SetUserMetadata(metadataKey, metadataValue)
|
||||
policy.SetContentEncoding("gzip")
|
||||
|
||||
// Add CRC32C
|
||||
checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
|
||||
err = policy.SetChecksum(checksum)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "SetChecksum failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
args["policy"] = policy.String()
|
||||
|
||||
presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
var formBuf bytes.Buffer
|
||||
writer := multipart.NewWriter(&formBuf)
|
||||
for k, v := range formData {
|
||||
writer.WriteField(k, v)
|
||||
}
|
||||
|
||||
// Get a 33KB file to upload and test if set post policy works
|
||||
filePath := getMintDataDirFilePath("datafile-33-kB")
|
||||
if filePath == "" {
|
||||
// Make a temp file with 33 KB data.
|
||||
file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||
return
|
||||
}
|
||||
if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
|
||||
logError(testName, function, args, startTime, "", "Copy failed", err)
|
||||
return
|
||||
}
|
||||
if err = file.Close(); err != nil {
|
||||
logError(testName, function, args, startTime, "", "File Close failed", err)
|
||||
return
|
||||
}
|
||||
filePath = file.Name()
|
||||
}
|
||||
|
||||
// add file to post request
|
||||
f, err := os.Open(filePath)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "File open failed", err)
|
||||
return
|
||||
}
|
||||
w, err := writer.CreateFormFile("", filePath)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = io.Copy(w, f)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Copy failed", err)
|
||||
return
|
||||
}
|
||||
writer.Close()
|
||||
|
||||
httpClient := &http.Client{
|
||||
// Setting a sensible time out of 30secs to wait for response
|
||||
// headers. Request is pro-actively canceled after 30secs
|
||||
// with no response.
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: createHTTPTransport(),
|
||||
}
|
||||
args["url"] = presignedPostPolicyURL.String()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Http request failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
// make post request with correct form data
|
||||
res, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Http request failed", err)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusBadRequest {
|
||||
logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
}
|
||||
if !strings.Contains(string(body), "MalformedPOSTRequest") {
|
||||
logError(testName, function, args, startTime, "", "Invalid error from server", errors.New(string(body)))
|
||||
}
|
||||
|
||||
logSuccess(testName, function, args, startTime)
|
||||
}
|
||||
|
||||
// Tests copy object
|
||||
func testCopyObject() {
|
||||
// initialize logging params
|
||||
|
|
@ -11560,8 +11726,11 @@ func testPutObjectMetadataNonUSASCIIV2() {
|
|||
}
|
||||
|
||||
for k, v := range metadata {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
|
||||
continue
|
||||
}
|
||||
if st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)) != v {
|
||||
logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get("X-Amz-Meta-"+k), err)
|
||||
logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -14069,8 +14238,7 @@ func main() {
|
|||
testUserMetadataCopyingV2()
|
||||
testPutObjectWithChecksums()
|
||||
testPutObjectWithTrailingChecksums()
|
||||
testPutMultipartObjectWithChecksums(false)
|
||||
testPutMultipartObjectWithChecksums(true)
|
||||
testPutMultipartObjectWithChecksums()
|
||||
testPutObject0ByteV2()
|
||||
testPutObjectMetadataNonUSASCIIV2()
|
||||
testPutObjectNoLengthV2()
|
||||
|
|
@ -14098,6 +14266,7 @@ func main() {
|
|||
testGetObjectReadAtWhenEOFWasReached()
|
||||
testPresignedPostPolicy()
|
||||
testPresignedPostPolicyWrongFile()
|
||||
testPresignedPostPolicyEmptyFileName()
|
||||
testCopyObject()
|
||||
testComposeObjectErrorCases()
|
||||
testCompose10KSources()
|
||||
|
|
|
|||
2
vendor/github.com/minio/minio-go/v7/hook-reader.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/hook-reader.go
generated
vendored
|
|
@ -84,7 +84,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
|
|||
// reports the data read from the source to the hook.
|
||||
func newHook(source, hook io.Reader) io.Reader {
|
||||
if hook == nil {
|
||||
return source
|
||||
return &hookReader{source: source}
|
||||
}
|
||||
return &hookReader{
|
||||
source: source,
|
||||
|
|
|
|||
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
|
|
@ -125,6 +125,7 @@ func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value
|
|||
queryValues := url.Values{}
|
||||
queryValues.Set("Action", "AssumeRoleWithCertificate")
|
||||
queryValues.Set("Version", STSVersion)
|
||||
queryValues.Set("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
|
||||
if i.TokenRevokeType != "" {
|
||||
queryValues.Set("TokenRevokeType", i.TokenRevokeType)
|
||||
}
|
||||
|
|
@ -134,10 +135,6 @@ func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value
|
|||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
if req.Form == nil {
|
||||
req.Form = url.Values{}
|
||||
}
|
||||
req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
|
||||
|
||||
client := i.Client
|
||||
if client == nil {
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
package peeker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
|
|
@ -417,7 +417,7 @@ func (p PostPolicy) String() string {
|
|||
|
||||
// marshalJSON - Provides Marshaled JSON in bytes.
|
||||
func (p PostPolicy) marshalJSON() []byte {
|
||||
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
|
||||
expirationStr := `"expiration":"` + p.expiration.UTC().Format(expirationDateFormat) + `"`
|
||||
var conditionsStr string
|
||||
conditions := []string{}
|
||||
for _, po := range p.conditions {
|
||||
|
|
|
|||
25
vendor/github.com/prometheus/otlptranslator/.gitignore
generated
vendored
Normal file
25
vendor/github.com/prometheus/otlptranslator/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# If you prefer the allow list template instead of the deny list, see community template:
|
||||
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
|
||||
#
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
go.work.sum
|
||||
|
||||
# env file
|
||||
.env
|
||||
106
vendor/github.com/prometheus/otlptranslator/.golangci.yml
generated
vendored
Normal file
106
vendor/github.com/prometheus/otlptranslator/.golangci.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofumpt
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/prometheus/otlptranslator)
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
linters:
|
||||
# Keep this list sorted alphabetically
|
||||
enable:
|
||||
- depguard
|
||||
- errorlint
|
||||
- exptostd
|
||||
- gocritic
|
||||
- godot
|
||||
- loggercheck
|
||||
- misspell
|
||||
- nilnesserr
|
||||
# TODO: Enable once https://github.com/golangci/golangci-lint/issues/3228 is fixed.
|
||||
# - nolintlint
|
||||
- perfsprint
|
||||
- predeclared
|
||||
- revive
|
||||
- sloglint
|
||||
- testifylint
|
||||
- unconvert
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: sync/atomic
|
||||
desc: Use go.uber.org/atomic instead of sync/atomic
|
||||
- pkg: github.com/stretchr/testify/assert
|
||||
desc: Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert
|
||||
- pkg: io/ioutil
|
||||
desc: Use corresponding 'os' or 'io' functions instead.
|
||||
- pkg: regexp
|
||||
desc: Use github.com/grafana/regexp instead of regexp
|
||||
- pkg: github.com/pkg/errors
|
||||
desc: Use 'errors' or 'fmt' instead of github.com/pkg/errors
|
||||
- pkg: golang.org/x/exp/slices
|
||||
desc: Use 'slices' instead.
|
||||
perfsprint:
|
||||
# Optimizes `fmt.Errorf`.
|
||||
errorf: true
|
||||
revive:
|
||||
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||
# So, it's needed to explicitly enable all required rules here.
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||
- name: blank-imports
|
||||
- name: comment-spacings
|
||||
- name: context-as-argument
|
||||
arguments:
|
||||
# Allow functions with test or bench signatures.
|
||||
- allowTypesBefore: '*testing.T,testing.TB'
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
- name: early-return
|
||||
arguments:
|
||||
- preserveScope
|
||||
# A lot of false positives: incorrectly identifies channel draining as "empty code block".
|
||||
# See https://github.com/mgechev/revive/issues/386
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
- name: errorf
|
||||
- name: exported
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
- name: unreachable-code
|
||||
- name: unused-parameter
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
testifylint:
|
||||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
enable-all: true
|
||||
run:
|
||||
timeout: 15m
|
||||
version: "2"
|
||||
3
vendor/github.com/prometheus/otlptranslator/CODE_OF_CONDUCT.md
generated
vendored
Normal file
3
vendor/github.com/prometheus/otlptranslator/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# Prometheus Community Code of Conduct
|
||||
|
||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
201
vendor/github.com/prometheus/otlptranslator/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/otlptranslator/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
4
vendor/github.com/prometheus/otlptranslator/MAINTAINERS.md
generated
vendored
Normal file
4
vendor/github.com/prometheus/otlptranslator/MAINTAINERS.md
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
* Arthur Silva Sens (arthursens2005@gmail.com / @ArthurSens)
|
||||
* Arve Knudsen (arve.knudsen@gmail.com / @aknuds1)
|
||||
* Jesús Vázquez (jesus.vazquez@grafana.com / @jesusvazquez)
|
||||
* Owen Williams (owen.williams@grafana.com / @ywwg)
|
||||
2
vendor/github.com/prometheus/otlptranslator/README.md
generated
vendored
Normal file
2
vendor/github.com/prometheus/otlptranslator/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# otlp-prometheus-translator
|
||||
Library providing API to convert OTLP metric and attribute names to respectively Prometheus metric and label names.
|
||||
6
vendor/github.com/prometheus/otlptranslator/SECURITY.md
generated
vendored
Normal file
6
vendor/github.com/prometheus/otlptranslator/SECURITY.md
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
# Reporting a security issue
|
||||
|
||||
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||
found here:
|
||||
|
||||
<https://prometheus.io/docs/operating/security/>
|
||||
38
vendor/github.com/prometheus/otlptranslator/constants.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/otlptranslator/constants.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package otlptranslator
|
||||
|
||||
const (
|
||||
// ExemplarTraceIDKey is the key used to store the trace ID in Prometheus
|
||||
// exemplars:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#exemplars
|
||||
ExemplarTraceIDKey = "trace_id"
|
||||
// ExemplarSpanIDKey is the key used to store the Span ID in Prometheus
|
||||
// exemplars:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#exemplars
|
||||
ExemplarSpanIDKey = "span_id"
|
||||
// ScopeNameLabelKey is the name of the label key used to identify the name
|
||||
// of the OpenTelemetry scope which produced the metric:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#instrumentation-scope
|
||||
ScopeNameLabelKey = "otel_scope_name"
|
||||
// ScopeVersionLabelKey is the name of the label key used to identify the
|
||||
// version of the OpenTelemetry scope which produced the metric:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#instrumentation-scope
|
||||
ScopeVersionLabelKey = "otel_scope_version"
|
||||
// TargetInfoMetricName is the name of the metric used to preserve resource
|
||||
// attributes in Prometheus format:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/e6eccba97ebaffbbfad6d4358408a2cead0ec2df/specification/compatibility/prometheus_and_openmetrics.md#resource-attributes-1
|
||||
// It originates from OpenMetrics:
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/1386544931307dff279688f332890c31b6c5de36/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems
|
||||
TargetInfoMetricName = "target_info"
|
||||
)
|
||||
275
vendor/github.com/prometheus/otlptranslator/metric_namer.go
generated
vendored
Normal file
275
vendor/github.com/prometheus/otlptranslator/metric_namer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/metric_name_builder.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The Prometheus Authors
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package otlptranslator
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
)
|
||||
|
||||
// The map to translate OTLP units to Prometheus units
|
||||
// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html
|
||||
// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units)
|
||||
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
|
||||
// OpenMetrics specification for units: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#units-and-base-units
|
||||
var unitMap = map[string]string{
|
||||
// Time
|
||||
"d": "days",
|
||||
"h": "hours",
|
||||
"min": "minutes",
|
||||
"s": "seconds",
|
||||
"ms": "milliseconds",
|
||||
"us": "microseconds",
|
||||
"ns": "nanoseconds",
|
||||
|
||||
// Bytes
|
||||
"By": "bytes",
|
||||
"KiBy": "kibibytes",
|
||||
"MiBy": "mebibytes",
|
||||
"GiBy": "gibibytes",
|
||||
"TiBy": "tibibytes",
|
||||
"KBy": "kilobytes",
|
||||
"MBy": "megabytes",
|
||||
"GBy": "gigabytes",
|
||||
"TBy": "terabytes",
|
||||
|
||||
// SI
|
||||
"m": "meters",
|
||||
"V": "volts",
|
||||
"A": "amperes",
|
||||
"J": "joules",
|
||||
"W": "watts",
|
||||
"g": "grams",
|
||||
|
||||
// Misc
|
||||
"Cel": "celsius",
|
||||
"Hz": "hertz",
|
||||
"1": "",
|
||||
"%": "percent",
|
||||
}
|
||||
|
||||
// The map that translates the "per" unit.
|
||||
// Example: s => per second (singular).
|
||||
var perUnitMap = map[string]string{
|
||||
"s": "second",
|
||||
"m": "minute",
|
||||
"h": "hour",
|
||||
"d": "day",
|
||||
"w": "week",
|
||||
"mo": "month",
|
||||
"y": "year",
|
||||
}
|
||||
|
||||
// MetricNamer is a helper struct to build metric names.
|
||||
type MetricNamer struct {
|
||||
Namespace string
|
||||
WithMetricSuffixes bool
|
||||
UTF8Allowed bool
|
||||
}
|
||||
|
||||
// Metric is a helper struct that holds information about a metric.
|
||||
type Metric struct {
|
||||
Name string
|
||||
Unit string
|
||||
Type MetricType
|
||||
}
|
||||
|
||||
// Build builds a metric name for the specified metric.
|
||||
//
|
||||
// If UTF8Allowed is true, the metric name is returned as is, only with the addition of type/unit suffixes and namespace preffix if required.
|
||||
// Otherwise the metric name is normalized to be Prometheus-compliant.
|
||||
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
|
||||
// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
|
||||
func (mn *MetricNamer) Build(metric Metric) string {
|
||||
if mn.UTF8Allowed {
|
||||
return mn.buildMetricName(metric.Name, metric.Unit, metric.Type)
|
||||
}
|
||||
return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type)
|
||||
}
|
||||
|
||||
func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) string {
|
||||
// Full normalization following standard Prometheus naming conventions
|
||||
if mn.WithMetricSuffixes {
|
||||
return normalizeName(name, unit, metricType, mn.Namespace)
|
||||
}
|
||||
|
||||
// Simple case (no full normalization, no units, etc.).
|
||||
metricName := strings.Join(strings.FieldsFunc(name, func(r rune) bool {
|
||||
return invalidMetricCharRE.MatchString(string(r))
|
||||
}), "_")
|
||||
|
||||
// Namespace?
|
||||
if mn.Namespace != "" {
|
||||
namespace := strings.Join(strings.FieldsFunc(mn.Namespace, func(r rune) bool {
|
||||
return invalidMetricCharRE.MatchString(string(r))
|
||||
}), "_")
|
||||
return namespace + "_" + metricName
|
||||
}
|
||||
|
||||
// Metric name starts with a digit? Prefix it with an underscore.
|
||||
if metricName != "" && unicode.IsDigit(rune(metricName[0])) {
|
||||
metricName = "_" + metricName
|
||||
}
|
||||
|
||||
return metricName
|
||||
}
|
||||
|
||||
var (
|
||||
// Regexp for metric name characters that should be replaced with _.
|
||||
invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`)
|
||||
multipleUnderscoresRE = regexp.MustCompile(`__+`)
|
||||
)
|
||||
|
||||
// isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :).
|
||||
func isValidCompliantMetricChar(r rune) bool {
|
||||
return (r >= 'a' && r <= 'z') ||
|
||||
(r >= 'A' && r <= 'Z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == ':'
|
||||
}
|
||||
|
||||
// replaceInvalidMetricChar replaces invalid metric name characters with underscore.
|
||||
func replaceInvalidMetricChar(r rune) rune {
|
||||
if isValidCompliantMetricChar(r) {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}
|
||||
|
||||
// Build a normalized name for the specified metric.
|
||||
func normalizeName(name, unit string, metricType MetricType, namespace string) string {
|
||||
// Split metric name into "tokens" (of supported metric name runes).
|
||||
// Note that this has the side effect of replacing multiple consecutive underscores with a single underscore.
|
||||
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
|
||||
nameTokens := strings.FieldsFunc(
|
||||
name,
|
||||
func(r rune) bool { return !isValidCompliantMetricChar(r) },
|
||||
)
|
||||
|
||||
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
|
||||
nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix))
|
||||
|
||||
// Append _total for Counters
|
||||
if metricType == MetricTypeMonotonicCounter {
|
||||
nameTokens = append(removeItem(nameTokens, "total"), "total")
|
||||
}
|
||||
|
||||
// Append _ratio for metrics with unit "1"
|
||||
// Some OTel receivers improperly use unit "1" for counters of objects
|
||||
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
|
||||
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
|
||||
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
|
||||
if unit == "1" && metricType == MetricTypeGauge {
|
||||
nameTokens = append(removeItem(nameTokens, "ratio"), "ratio")
|
||||
}
|
||||
|
||||
// Namespace?
|
||||
if namespace != "" {
|
||||
nameTokens = append([]string{namespace}, nameTokens...)
|
||||
}
|
||||
|
||||
// Build the string from the tokens, separated with underscores
|
||||
normalizedName := strings.Join(nameTokens, "_")
|
||||
|
||||
// Metric name cannot start with a digit, so prefix it with "_" in this case
|
||||
if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) {
|
||||
normalizedName = "_" + normalizedName
|
||||
}
|
||||
|
||||
return normalizedName
|
||||
}
|
||||
|
||||
// addUnitTokens will add the suffixes to the nameTokens if they are not already present.
|
||||
// It will also remove trailing underscores from the main suffix to avoid double underscores
|
||||
// when joining the tokens.
|
||||
//
|
||||
// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just
|
||||
// 'per_', it will be entirely removed.
|
||||
func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string {
|
||||
if slices.Contains(nameTokens, mainUnitSuffix) {
|
||||
mainUnitSuffix = ""
|
||||
}
|
||||
|
||||
if perUnitSuffix == "per_" {
|
||||
perUnitSuffix = ""
|
||||
} else {
|
||||
perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_")
|
||||
if slices.Contains(nameTokens, perUnitSuffix) {
|
||||
perUnitSuffix = ""
|
||||
}
|
||||
}
|
||||
|
||||
if perUnitSuffix != "" {
|
||||
mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_")
|
||||
}
|
||||
|
||||
if mainUnitSuffix != "" {
|
||||
nameTokens = append(nameTokens, mainUnitSuffix)
|
||||
}
|
||||
if perUnitSuffix != "" {
|
||||
nameTokens = append(nameTokens, perUnitSuffix)
|
||||
}
|
||||
return nameTokens
|
||||
}
|
||||
|
||||
// Remove the specified value from the slice.
|
||||
func removeItem(slice []string, value string) []string {
|
||||
newSlice := make([]string, 0, len(slice))
|
||||
for _, sliceEntry := range slice {
|
||||
if sliceEntry != value {
|
||||
newSlice = append(newSlice, sliceEntry)
|
||||
}
|
||||
}
|
||||
return newSlice
|
||||
}
|
||||
|
||||
func (mn *MetricNamer) buildMetricName(name, unit string, metricType MetricType) string {
|
||||
if mn.Namespace != "" {
|
||||
name = mn.Namespace + "_" + name
|
||||
}
|
||||
|
||||
if mn.WithMetricSuffixes {
|
||||
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
|
||||
if mainUnitSuffix != "" {
|
||||
name = name + "_" + mainUnitSuffix
|
||||
}
|
||||
if perUnitSuffix != "" {
|
||||
name = name + "_" + perUnitSuffix
|
||||
}
|
||||
|
||||
// Append _total for Counters
|
||||
if metricType == MetricTypeMonotonicCounter {
|
||||
name += "_total"
|
||||
}
|
||||
|
||||
// Append _ratio for metrics with unit "1"
|
||||
// Some OTel receivers improperly use unit "1" for counters of objects
|
||||
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
|
||||
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
|
||||
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
|
||||
if unit == "1" && metricType == MetricTypeGauge {
|
||||
name += "_ratio"
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
36
vendor/github.com/prometheus/otlptranslator/metric_type.go
generated
vendored
Normal file
36
vendor/github.com/prometheus/otlptranslator/metric_type.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
|
||||
package otlptranslator
|
||||
|
||||
// MetricType is a representation of metric types from OpenTelemetry.
|
||||
// Different types of Sums were introduced based on their metric temporalities.
|
||||
// For more details, see:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums
|
||||
type MetricType int
|
||||
|
||||
const (
|
||||
// MetricTypeUnknown represents an unknown metric type.
|
||||
MetricTypeUnknown = iota
|
||||
// MetricTypeNonMonotonicCounter represents a counter that is not monotonically increasing, also known as delta counter.
|
||||
MetricTypeNonMonotonicCounter
|
||||
// MetricTypeMonotonicCounter represents a counter that is monotonically increasing, also known as cumulative counter.
|
||||
MetricTypeMonotonicCounter
|
||||
// MetricTypeGauge represents a gauge metric.
|
||||
MetricTypeGauge
|
||||
// MetricTypeHistogram represents a histogram metric.
|
||||
MetricTypeHistogram
|
||||
// MetricTypeExponentialHistogram represents an exponential histogram metric.
|
||||
MetricTypeExponentialHistogram
|
||||
// MetricTypeSummary represents a summary metric.
|
||||
MetricTypeSummary
|
||||
)
|
||||
57
vendor/github.com/prometheus/otlptranslator/normalize_label.go
generated
vendored
Normal file
57
vendor/github.com/prometheus/otlptranslator/normalize_label.go
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The Prometheus Authors
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package otlptranslator
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// LabelNamer is a helper struct to build label names.
|
||||
type LabelNamer struct {
|
||||
UTF8Allowed bool
|
||||
}
|
||||
|
||||
// Build normalizes the specified label to follow Prometheus label names standard.
|
||||
//
|
||||
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
|
||||
//
|
||||
// Labels that start with non-letter rune will be prefixed with "key_".
|
||||
// An exception is made for double-underscores which are allowed.
|
||||
//
|
||||
// If UTF8Allowed is true, the label is returned as is. This option is provided just to
|
||||
// keep a consistent interface with the MetricNamer.
|
||||
func (ln *LabelNamer) Build(label string) string {
|
||||
// Trivial case.
|
||||
if len(label) == 0 || ln.UTF8Allowed {
|
||||
return label
|
||||
}
|
||||
|
||||
label = sanitizeLabelName(label)
|
||||
|
||||
// If label starts with a number, prepend with "key_".
|
||||
if unicode.IsDigit(rune(label[0])) {
|
||||
label = "key_" + label
|
||||
} else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") {
|
||||
label = "key" + label
|
||||
}
|
||||
|
||||
return label
|
||||
}
|
||||
42
vendor/github.com/prometheus/otlptranslator/strconv.go
generated
vendored
Normal file
42
vendor/github.com/prometheus/otlptranslator/strconv.go
generated
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/strconv.go.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The Prometheus Authors
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package otlptranslator
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// sanitizeLabelName replaces any characters not valid according to the
|
||||
// classical Prometheus label naming scheme with an underscore.
|
||||
// Note: this does not handle all Prometheus label name restrictions (such as
|
||||
// not starting with a digit 0-9), and hence should only be used if the label
|
||||
// name is prefixed with a known valid string.
|
||||
func sanitizeLabelName(name string) string {
|
||||
var b strings.Builder
|
||||
b.Grow(len(name))
|
||||
for _, r := range name {
|
||||
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') {
|
||||
b.WriteRune(r)
|
||||
} else {
|
||||
b.WriteRune('_')
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
110
vendor/github.com/prometheus/otlptranslator/unit_namer.go
generated
vendored
Normal file
110
vendor/github.com/prometheus/otlptranslator/unit_namer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
|
||||
package otlptranslator
|
||||
|
||||
import "strings"
|
||||
|
||||
// UnitNamer is a helper for building compliant unit names.
|
||||
type UnitNamer struct {
|
||||
UTF8Allowed bool
|
||||
}
|
||||
|
||||
// Build builds a unit name for the specified unit string.
|
||||
// It processes the unit by splitting it into main and per components,
|
||||
// applying appropriate unit mappings, and cleaning up invalid characters
|
||||
// when the whole UTF-8 character set is not allowed.
|
||||
func (un *UnitNamer) Build(unit string) string {
|
||||
mainUnit, perUnit := buildUnitSuffixes(unit)
|
||||
if !un.UTF8Allowed {
|
||||
mainUnit, perUnit = cleanUpUnit(mainUnit), cleanUpUnit(perUnit)
|
||||
}
|
||||
|
||||
var u string
|
||||
switch {
|
||||
case mainUnit != "" && perUnit != "":
|
||||
u = mainUnit + "_" + perUnit
|
||||
case mainUnit != "":
|
||||
u = mainUnit
|
||||
default:
|
||||
u = perUnit
|
||||
}
|
||||
|
||||
// Clean up leading and trailing underscores
|
||||
if len(u) > 0 && u[0:1] == "_" {
|
||||
u = u[1:]
|
||||
}
|
||||
if len(u) > 0 && u[len(u)-1:] == "_" {
|
||||
u = u[:len(u)-1]
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit.
|
||||
// Returns the specified unit if not found in unitMap.
|
||||
func unitMapGetOrDefault(unit string) string {
|
||||
if promUnit, ok := unitMap[unit]; ok {
|
||||
return promUnit
|
||||
}
|
||||
return unit
|
||||
}
|
||||
|
||||
// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit.
|
||||
// Returns the specified unit if not found in perUnitMap.
|
||||
func perUnitMapGetOrDefault(perUnit string) string {
|
||||
if promPerUnit, ok := perUnitMap[perUnit]; ok {
|
||||
return promPerUnit
|
||||
}
|
||||
return perUnit
|
||||
}
|
||||
|
||||
// buildUnitSuffixes builds the main and per unit suffixes for the specified unit
|
||||
// but doesn't do any special character transformation to accommodate Prometheus naming conventions.
|
||||
// Removing trailing underscores or appending suffixes is done in the caller.
|
||||
func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) {
|
||||
// Split unit at the '/' if any
|
||||
unitTokens := strings.SplitN(unit, "/", 2)
|
||||
|
||||
if len(unitTokens) > 0 {
|
||||
// Main unit
|
||||
// Update if not blank and doesn't contain '{}'
|
||||
mainUnitOTel := strings.TrimSpace(unitTokens[0])
|
||||
if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") {
|
||||
mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel)
|
||||
}
|
||||
|
||||
// Per unit
|
||||
// Update if not blank and doesn't contain '{}'
|
||||
if len(unitTokens) > 1 && unitTokens[1] != "" {
|
||||
perUnitOTel := strings.TrimSpace(unitTokens[1])
|
||||
if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") {
|
||||
perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel)
|
||||
}
|
||||
if perUnitSuffix != "" {
|
||||
perUnitSuffix = "per_" + perUnitSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mainUnitSuffix, perUnitSuffix
|
||||
}
|
||||
|
||||
// cleanUpUnit cleans up unit so it matches model.LabelNameRE.
|
||||
func cleanUpUnit(unit string) string {
|
||||
// Multiple consecutive underscores are replaced with a single underscore.
|
||||
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
|
||||
return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString(
|
||||
strings.Map(replaceInvalidMetricChar, unit),
|
||||
"_",
|
||||
), "_")
|
||||
}
|
||||
15
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
15
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
|
@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
|||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
|
|
@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.1.5
|
||||
GOLANGCI_FMT_OPTS ?=
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
@ -156,9 +157,13 @@ $(GOTEST_DIR):
|
|||
@mkdir -p $@
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
common-format: $(GOLANGCI_LINT)
|
||||
@echo ">> formatting code"
|
||||
$(GO) fmt $(pkgs)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> formatting code with golangci-lint"
|
||||
$(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS)
|
||||
endif
|
||||
|
||||
.PHONY: common-vet
|
||||
common-vet:
|
||||
|
|
@ -248,8 +253,8 @@ $(PROMU):
|
|||
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||
rm -r $(PROMU_TMP)
|
||||
|
||||
.PHONY: proto
|
||||
proto:
|
||||
.PHONY: common-proto
|
||||
common-proto:
|
||||
@echo ">> generating code from proto files"
|
||||
@./scripts/genproto.sh
|
||||
|
||||
|
|
|
|||
5
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
5
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
|
|
@ -123,13 +123,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
finish := float64(0)
|
||||
pct := float64(0)
|
||||
recovering := strings.Contains(lines[syncLineIdx], "recovery")
|
||||
reshaping := strings.Contains(lines[syncLineIdx], "reshape")
|
||||
resyncing := strings.Contains(lines[syncLineIdx], "resync")
|
||||
checking := strings.Contains(lines[syncLineIdx], "check")
|
||||
|
||||
// Append recovery and resyncing state info.
|
||||
if recovering || resyncing || checking {
|
||||
if recovering || resyncing || checking || reshaping {
|
||||
if recovering {
|
||||
state = "recovering"
|
||||
} else if reshaping {
|
||||
state = "reshaping"
|
||||
} else if checking {
|
||||
state = "checking"
|
||||
} else {
|
||||
|
|
|
|||
33
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
33
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
|
|
@ -66,6 +66,10 @@ type Meminfo struct {
|
|||
// Memory which has been evicted from RAM, and is temporarily
|
||||
// on the disk
|
||||
SwapFree *uint64
|
||||
// Memory consumed by the zswap backend (compressed size)
|
||||
Zswap *uint64
|
||||
// Amount of anonymous memory stored in zswap (original size)
|
||||
Zswapped *uint64
|
||||
// Memory which is waiting to get written back to the disk
|
||||
Dirty *uint64
|
||||
// Memory which is actively being written back to the disk
|
||||
|
|
@ -85,6 +89,8 @@ type Meminfo struct {
|
|||
// amount of memory dedicated to the lowest level of page
|
||||
// tables.
|
||||
PageTables *uint64
|
||||
// secondary page tables.
|
||||
SecPageTables *uint64
|
||||
// NFS pages sent to the server, but not yet committed to
|
||||
// stable storage
|
||||
NFSUnstable *uint64
|
||||
|
|
@ -129,15 +135,18 @@ type Meminfo struct {
|
|||
Percpu *uint64
|
||||
HardwareCorrupted *uint64
|
||||
AnonHugePages *uint64
|
||||
FileHugePages *uint64
|
||||
ShmemHugePages *uint64
|
||||
ShmemPmdMapped *uint64
|
||||
CmaTotal *uint64
|
||||
CmaFree *uint64
|
||||
Unaccepted *uint64
|
||||
HugePagesTotal *uint64
|
||||
HugePagesFree *uint64
|
||||
HugePagesRsvd *uint64
|
||||
HugePagesSurp *uint64
|
||||
Hugepagesize *uint64
|
||||
Hugetlb *uint64
|
||||
DirectMap4k *uint64
|
||||
DirectMap2M *uint64
|
||||
DirectMap1G *uint64
|
||||
|
|
@ -161,6 +170,8 @@ type Meminfo struct {
|
|||
MlockedBytes *uint64
|
||||
SwapTotalBytes *uint64
|
||||
SwapFreeBytes *uint64
|
||||
ZswapBytes *uint64
|
||||
ZswappedBytes *uint64
|
||||
DirtyBytes *uint64
|
||||
WritebackBytes *uint64
|
||||
AnonPagesBytes *uint64
|
||||
|
|
@ -171,6 +182,7 @@ type Meminfo struct {
|
|||
SUnreclaimBytes *uint64
|
||||
KernelStackBytes *uint64
|
||||
PageTablesBytes *uint64
|
||||
SecPageTablesBytes *uint64
|
||||
NFSUnstableBytes *uint64
|
||||
BounceBytes *uint64
|
||||
WritebackTmpBytes *uint64
|
||||
|
|
@ -182,11 +194,14 @@ type Meminfo struct {
|
|||
PercpuBytes *uint64
|
||||
HardwareCorruptedBytes *uint64
|
||||
AnonHugePagesBytes *uint64
|
||||
FileHugePagesBytes *uint64
|
||||
ShmemHugePagesBytes *uint64
|
||||
ShmemPmdMappedBytes *uint64
|
||||
CmaTotalBytes *uint64
|
||||
CmaFreeBytes *uint64
|
||||
UnacceptedBytes *uint64
|
||||
HugepagesizeBytes *uint64
|
||||
HugetlbBytes *uint64
|
||||
DirectMap4kBytes *uint64
|
||||
DirectMap2MBytes *uint64
|
||||
DirectMap1GBytes *uint64
|
||||
|
|
@ -287,6 +302,12 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||
case "SwapFree:":
|
||||
m.SwapFree = &val
|
||||
m.SwapFreeBytes = &valBytes
|
||||
case "Zswap:":
|
||||
m.Zswap = &val
|
||||
m.ZswapBytes = &valBytes
|
||||
case "Zswapped:":
|
||||
m.Zswapped = &val
|
||||
m.ZswapBytes = &valBytes
|
||||
case "Dirty:":
|
||||
m.Dirty = &val
|
||||
m.DirtyBytes = &valBytes
|
||||
|
|
@ -317,6 +338,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||
case "PageTables:":
|
||||
m.PageTables = &val
|
||||
m.PageTablesBytes = &valBytes
|
||||
case "SecPageTables:":
|
||||
m.SecPageTables = &val
|
||||
m.SecPageTablesBytes = &valBytes
|
||||
case "NFS_Unstable:":
|
||||
m.NFSUnstable = &val
|
||||
m.NFSUnstableBytes = &valBytes
|
||||
|
|
@ -350,6 +374,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||
case "AnonHugePages:":
|
||||
m.AnonHugePages = &val
|
||||
m.AnonHugePagesBytes = &valBytes
|
||||
case "FileHugePages:":
|
||||
m.FileHugePages = &val
|
||||
m.FileHugePagesBytes = &valBytes
|
||||
case "ShmemHugePages:":
|
||||
m.ShmemHugePages = &val
|
||||
m.ShmemHugePagesBytes = &valBytes
|
||||
|
|
@ -362,6 +389,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||
case "CmaFree:":
|
||||
m.CmaFree = &val
|
||||
m.CmaFreeBytes = &valBytes
|
||||
case "Unaccepted:":
|
||||
m.Unaccepted = &val
|
||||
m.UnacceptedBytes = &valBytes
|
||||
case "HugePages_Total:":
|
||||
m.HugePagesTotal = &val
|
||||
case "HugePages_Free:":
|
||||
|
|
@ -373,6 +403,9 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||
case "Hugepagesize:":
|
||||
m.Hugepagesize = &val
|
||||
m.HugepagesizeBytes = &valBytes
|
||||
case "Hugetlb:":
|
||||
m.Hugetlb = &val
|
||||
m.HugetlbBytes = &valBytes
|
||||
case "DirectMap4k:":
|
||||
m.DirectMap4k = &val
|
||||
m.DirectMap4kBytes = &valBytes
|
||||
|
|
|
|||
12
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
12
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
|
|
@ -101,6 +101,12 @@ type ProcStat struct {
|
|||
RSS int
|
||||
// Soft limit in bytes on the rss of the process.
|
||||
RSSLimit uint64
|
||||
// The address above which program text can run.
|
||||
StartCode uint64
|
||||
// The address below which program text can run.
|
||||
EndCode uint64
|
||||
// The address of the start (i.e., bottom) of the stack.
|
||||
StartStack uint64
|
||||
// CPU number last executed on.
|
||||
Processor uint
|
||||
// Real-time scheduling priority, a number in the range 1 to 99 for processes
|
||||
|
|
@ -177,9 +183,9 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||
&s.VSize,
|
||||
&s.RSS,
|
||||
&s.RSSLimit,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&s.StartCode,
|
||||
&s.EndCode,
|
||||
&s.StartStack,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
|
|
|
|||
116
vendor/github.com/prometheus/procfs/proc_statm.go
generated
vendored
Normal file
116
vendor/github.com/prometheus/procfs/proc_statm.go
generated
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// - https://man7.org/linux/man-pages/man5/proc_pid_statm.5.html
|
||||
|
||||
// ProcStatm Provides memory usage information for a process, measured in memory pages.
|
||||
// Read from /proc/[pid]/statm.
|
||||
type ProcStatm struct {
|
||||
// The process ID.
|
||||
PID int
|
||||
// total program size (same as VmSize in status)
|
||||
Size uint64
|
||||
// resident set size (same as VmRSS in status)
|
||||
Resident uint64
|
||||
// number of resident shared pages (i.e., backed by a file)
|
||||
Shared uint64
|
||||
// text (code)
|
||||
Text uint64
|
||||
// library (unused since Linux 2.6; always 0)
|
||||
Lib uint64
|
||||
// data + stack
|
||||
Data uint64
|
||||
// dirty pages (unused since Linux 2.6; always 0)
|
||||
Dt uint64
|
||||
}
|
||||
|
||||
// NewStatm returns the current status information of the process.
|
||||
// Deprecated: Use p.Statm() instead.
|
||||
func (p Proc) NewStatm() (ProcStatm, error) {
|
||||
return p.Statm()
|
||||
}
|
||||
|
||||
// Statm returns the current memory usage information of the process.
|
||||
func (p Proc) Statm() (ProcStatm, error) {
|
||||
data, err := util.ReadFileNoStat(p.path("statm"))
|
||||
if err != nil {
|
||||
return ProcStatm{}, err
|
||||
}
|
||||
|
||||
statmSlice, err := parseStatm(data)
|
||||
if err != nil {
|
||||
return ProcStatm{}, err
|
||||
}
|
||||
|
||||
procStatm := ProcStatm{
|
||||
PID: p.PID,
|
||||
Size: statmSlice[0],
|
||||
Resident: statmSlice[1],
|
||||
Shared: statmSlice[2],
|
||||
Text: statmSlice[3],
|
||||
Lib: statmSlice[4],
|
||||
Data: statmSlice[5],
|
||||
Dt: statmSlice[6],
|
||||
}
|
||||
|
||||
return procStatm, nil
|
||||
}
|
||||
|
||||
// parseStatm return /proc/[pid]/statm data to uint64 slice.
|
||||
func parseStatm(data []byte) ([]uint64, error) {
|
||||
var statmSlice []uint64
|
||||
statmItems := strings.Fields(string(data))
|
||||
for i := 0; i < len(statmItems); i++ {
|
||||
statmItem, err := strconv.ParseUint(statmItems[i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statmSlice = append(statmSlice, statmItem)
|
||||
}
|
||||
return statmSlice, nil
|
||||
}
|
||||
|
||||
// SizeBytes returns the process of total program size in bytes.
|
||||
func (s ProcStatm) SizeBytes() uint64 {
|
||||
return s.Size * uint64(os.Getpagesize())
|
||||
}
|
||||
|
||||
// ResidentBytes returns the process of resident set size in bytes.
|
||||
func (s ProcStatm) ResidentBytes() uint64 {
|
||||
return s.Resident * uint64(os.Getpagesize())
|
||||
}
|
||||
|
||||
// SHRBytes returns the process of share memory size in bytes.
|
||||
func (s ProcStatm) SHRBytes() uint64 {
|
||||
return s.Shared * uint64(os.Getpagesize())
|
||||
}
|
||||
|
||||
// TextBytes returns the process of text (code) size in bytes.
|
||||
func (s ProcStatm) TextBytes() uint64 {
|
||||
return s.Text * uint64(os.Getpagesize())
|
||||
}
|
||||
|
||||
// DataBytes returns the process of data + stack size in bytes.
|
||||
func (s ProcStatm) DataBytes() uint64 {
|
||||
return s.Data * uint64(os.Getpagesize())
|
||||
}
|
||||
27
vendor/github.com/spf13/pflag/README.md
generated
vendored
27
vendor/github.com/spf13/pflag/README.md
generated
vendored
|
|
@ -284,6 +284,33 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
### Using pflag with go test
|
||||
`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`).
|
||||
For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details.
|
||||
|
||||
For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this:
|
||||
```bash
|
||||
go test /your/tests -run ^YourTest -v --your-test-pflags
|
||||
```
|
||||
will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags.
|
||||
To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package.
|
||||
|
||||
**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()`
|
||||
```go
|
||||
import (
|
||||
goflag "flag"
|
||||
flag "github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
|
||||
|
||||
func main() {
|
||||
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine)
|
||||
flag.Parse()
|
||||
}
|
||||
```
|
||||
|
||||
## More info
|
||||
|
||||
You can see the full reference documentation of the pflag package
|
||||
|
|
|
|||
40
vendor/github.com/spf13/pflag/bool_func.go
generated
vendored
Normal file
40
vendor/github.com/spf13/pflag/bool_func.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package pflag
|
||||
|
||||
// -- func Value
|
||||
type boolfuncValue func(string) error
|
||||
|
||||
func (f boolfuncValue) Set(s string) error { return f(s) }
|
||||
|
||||
func (f boolfuncValue) Type() string { return "boolfunc" }
|
||||
|
||||
func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package
|
||||
|
||||
func (f boolfuncValue) IsBoolFlag() bool { return true }
|
||||
|
||||
// BoolFunc defines a func flag with specified name, callback function and usage string.
|
||||
//
|
||||
// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed
|
||||
// on the command line.
|
||||
func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) {
|
||||
f.BoolFuncP(name, "", usage, fn)
|
||||
}
|
||||
|
||||
// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) {
|
||||
var val Value = boolfuncValue(fn)
|
||||
flag := f.VarPF(val, name, shorthand, usage)
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
|
||||
// BoolFunc defines a func flag with specified name, callback function and usage string.
|
||||
//
|
||||
// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed
|
||||
// on the command line.
|
||||
func BoolFunc(name string, usage string, fn func(string) error) {
|
||||
CommandLine.BoolFuncP(name, "", usage, fn)
|
||||
}
|
||||
|
||||
// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash.
|
||||
func BoolFuncP(name, shorthand string, usage string, fn func(string) error) {
|
||||
CommandLine.BoolFuncP(name, shorthand, usage, fn)
|
||||
}
|
||||
2
vendor/github.com/spf13/pflag/count.go
generated
vendored
2
vendor/github.com/spf13/pflag/count.go
generated
vendored
|
|
@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
|
|||
|
||||
// Count defines a count flag with specified name, default value, and usage string.
|
||||
// The return value is the address of an int variable that stores the value of the flag.
|
||||
// A count flag will add 1 to its value evey time it is found on the command line
|
||||
// A count flag will add 1 to its value every time it is found on the command line
|
||||
func Count(name string, usage string) *int {
|
||||
return CommandLine.CountP(name, "", usage)
|
||||
}
|
||||
|
|
|
|||
149
vendor/github.com/spf13/pflag/errors.go
generated
vendored
Normal file
149
vendor/github.com/spf13/pflag/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
package pflag
|
||||
|
||||
import "fmt"
|
||||
|
||||
// notExistErrorMessageType specifies which flavor of "flag does not exist"
|
||||
// is printed by NotExistError. This allows the related errors to be grouped
|
||||
// under a single NotExistError struct without making a breaking change to
|
||||
// the error message text.
|
||||
type notExistErrorMessageType int
|
||||
|
||||
const (
|
||||
flagNotExistMessage notExistErrorMessageType = iota
|
||||
flagNotDefinedMessage
|
||||
flagNoSuchFlagMessage
|
||||
flagUnknownFlagMessage
|
||||
flagUnknownShorthandFlagMessage
|
||||
)
|
||||
|
||||
// NotExistError is the error returned when trying to access a flag that
|
||||
// does not exist in the FlagSet.
|
||||
type NotExistError struct {
|
||||
name string
|
||||
specifiedShorthands string
|
||||
messageType notExistErrorMessageType
|
||||
}
|
||||
|
||||
// Error implements error.
|
||||
func (e *NotExistError) Error() string {
|
||||
switch e.messageType {
|
||||
case flagNotExistMessage:
|
||||
return fmt.Sprintf("flag %q does not exist", e.name)
|
||||
|
||||
case flagNotDefinedMessage:
|
||||
return fmt.Sprintf("flag accessed but not defined: %s", e.name)
|
||||
|
||||
case flagNoSuchFlagMessage:
|
||||
return fmt.Sprintf("no such flag -%v", e.name)
|
||||
|
||||
case flagUnknownFlagMessage:
|
||||
return fmt.Sprintf("unknown flag: --%s", e.name)
|
||||
|
||||
case flagUnknownShorthandFlagMessage:
|
||||
c := rune(e.name[0])
|
||||
return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands)
|
||||
}
|
||||
|
||||
panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType))
|
||||
}
|
||||
|
||||
// GetSpecifiedName returns the name of the flag (without dashes) as it
|
||||
// appeared in the parsed arguments.
|
||||
func (e *NotExistError) GetSpecifiedName() string {
|
||||
return e.name
|
||||
}
|
||||
|
||||
// GetSpecifiedShortnames returns the group of shorthand arguments
|
||||
// (without dashes) that the flag appeared within. If the flag was not in a
|
||||
// shorthand group, this will return an empty string.
|
||||
func (e *NotExistError) GetSpecifiedShortnames() string {
|
||||
return e.specifiedShorthands
|
||||
}
|
||||
|
||||
// ValueRequiredError is the error returned when a flag needs an argument but
|
||||
// no argument was provided.
|
||||
type ValueRequiredError struct {
|
||||
flag *Flag
|
||||
specifiedName string
|
||||
specifiedShorthands string
|
||||
}
|
||||
|
||||
// Error implements error.
|
||||
func (e *ValueRequiredError) Error() string {
|
||||
if len(e.specifiedShorthands) > 0 {
|
||||
c := rune(e.specifiedName[0])
|
||||
return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName)
|
||||
}
|
||||
|
||||
// GetFlag returns the flag for which the error occurred.
|
||||
func (e *ValueRequiredError) GetFlag() *Flag {
|
||||
return e.flag
|
||||
}
|
||||
|
||||
// GetSpecifiedName returns the name of the flag (without dashes) as it
|
||||
// appeared in the parsed arguments.
|
||||
func (e *ValueRequiredError) GetSpecifiedName() string {
|
||||
return e.specifiedName
|
||||
}
|
||||
|
||||
// GetSpecifiedShortnames returns the group of shorthand arguments
|
||||
// (without dashes) that the flag appeared within. If the flag was not in a
|
||||
// shorthand group, this will return an empty string.
|
||||
func (e *ValueRequiredError) GetSpecifiedShortnames() string {
|
||||
return e.specifiedShorthands
|
||||
}
|
||||
|
||||
// InvalidValueError is the error returned when an invalid value is used
|
||||
// for a flag.
|
||||
type InvalidValueError struct {
|
||||
flag *Flag
|
||||
value string
|
||||
cause error
|
||||
}
|
||||
|
||||
// Error implements error.
|
||||
func (e *InvalidValueError) Error() string {
|
||||
flag := e.flag
|
||||
var flagName string
|
||||
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
|
||||
flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
|
||||
} else {
|
||||
flagName = fmt.Sprintf("--%s", flag.Name)
|
||||
}
|
||||
return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause)
|
||||
}
|
||||
|
||||
// Unwrap implements errors.Unwrap.
|
||||
func (e *InvalidValueError) Unwrap() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// GetFlag returns the flag for which the error occurred.
|
||||
func (e *InvalidValueError) GetFlag() *Flag {
|
||||
return e.flag
|
||||
}
|
||||
|
||||
// GetValue returns the invalid value that was provided.
|
||||
func (e *InvalidValueError) GetValue() string {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// InvalidSyntaxError is the error returned when a bad flag name is passed on
|
||||
// the command line.
|
||||
type InvalidSyntaxError struct {
|
||||
specifiedFlag string
|
||||
}
|
||||
|
||||
// Error implements error.
|
||||
func (e *InvalidSyntaxError) Error() string {
|
||||
return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag)
|
||||
}
|
||||
|
||||
// GetSpecifiedName returns the exact flag (with dashes) as it
|
||||
// appeared in the parsed arguments.
|
||||
func (e *InvalidSyntaxError) GetSpecifiedFlag() string {
|
||||
return e.specifiedFlag
|
||||
}
|
||||
85
vendor/github.com/spf13/pflag/flag.go
generated
vendored
85
vendor/github.com/spf13/pflag/flag.go
generated
vendored
|
|
@ -27,23 +27,32 @@ unaffected.
|
|||
Define flags using flag.String(), Bool(), Int(), etc.
|
||||
|
||||
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
|
||||
|
||||
var ip = flag.Int("flagname", 1234, "help message for flagname")
|
||||
|
||||
If you like, you can bind the flag to a variable using the Var() functions.
|
||||
|
||||
var flagvar int
|
||||
func init() {
|
||||
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
|
||||
}
|
||||
|
||||
Or you can create custom flags that satisfy the Value interface (with
|
||||
pointer receivers) and couple them to flag parsing by
|
||||
|
||||
flag.Var(&flagVal, "name", "help message for flagname")
|
||||
|
||||
For such flags, the default value is just the initial value of the variable.
|
||||
|
||||
After all flags are defined, call
|
||||
|
||||
flag.Parse()
|
||||
|
||||
to parse the command line into the defined flags.
|
||||
|
||||
Flags may then be used directly. If you're using the flags themselves,
|
||||
they are all pointers; if you bind to variables, they're values.
|
||||
|
||||
fmt.Println("ip has value ", *ip)
|
||||
fmt.Println("flagvar has value ", flagvar)
|
||||
|
||||
|
|
@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1.
|
|||
The pflag package also defines some new functions that are not in flag,
|
||||
that give one-letter shorthands for flags. You can use these by appending
|
||||
'P' to the name of any function that defines a flag.
|
||||
|
||||
var ip = flag.IntP("flagname", "f", 1234, "help message")
|
||||
var flagvar bool
|
||||
func init() {
|
||||
flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
|
||||
}
|
||||
flag.VarP(&flagval, "varname", "v", "help message")
|
||||
|
||||
Shorthand letters can be used with single dashes on the command line.
|
||||
Boolean shorthand flags can be combined with other shorthand flags.
|
||||
|
||||
Command line flag syntax:
|
||||
|
||||
--flag // boolean flags only
|
||||
--flag=x
|
||||
|
||||
Unlike the flag package, a single dash before an option means something
|
||||
different than a double dash. Single dashes signify a series of shorthand
|
||||
letters for flags. All but the last shorthand letter must be boolean flags.
|
||||
|
||||
// boolean flags
|
||||
-f
|
||||
-abc
|
||||
|
|
@ -381,7 +394,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag {
|
|||
func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
err := fmt.Errorf("flag accessed but not defined: %s", name)
|
||||
err := &NotExistError{name: name, messageType: flagNotDefinedMessage}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -411,7 +424,7 @@ func (f *FlagSet) ArgsLenAtDash() int {
|
|||
func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
return fmt.Errorf("flag %q does not exist", name)
|
||||
return &NotExistError{name: name, messageType: flagNotExistMessage}
|
||||
}
|
||||
if usageMessage == "" {
|
||||
return fmt.Errorf("deprecated message for flag %q must be set", name)
|
||||
|
|
@ -427,7 +440,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
|
|||
func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
return fmt.Errorf("flag %q does not exist", name)
|
||||
return &NotExistError{name: name, messageType: flagNotExistMessage}
|
||||
}
|
||||
if usageMessage == "" {
|
||||
return fmt.Errorf("deprecated message for flag %q must be set", name)
|
||||
|
|
@ -441,7 +454,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro
|
|||
func (f *FlagSet) MarkHidden(name string) error {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
return fmt.Errorf("flag %q does not exist", name)
|
||||
return &NotExistError{name: name, messageType: flagNotExistMessage}
|
||||
}
|
||||
flag.Hidden = true
|
||||
return nil
|
||||
|
|
@ -464,18 +477,16 @@ func (f *FlagSet) Set(name, value string) error {
|
|||
normalName := f.normalizeFlagName(name)
|
||||
flag, ok := f.formal[normalName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such flag -%v", name)
|
||||
return &NotExistError{name: name, messageType: flagNoSuchFlagMessage}
|
||||
}
|
||||
|
||||
err := flag.Value.Set(value)
|
||||
if err != nil {
|
||||
var flagName string
|
||||
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
|
||||
flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
|
||||
} else {
|
||||
flagName = fmt.Sprintf("--%s", flag.Name)
|
||||
return &InvalidValueError{
|
||||
flag: flag,
|
||||
value: value,
|
||||
cause: err,
|
||||
}
|
||||
return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
|
||||
}
|
||||
|
||||
if !flag.Changed {
|
||||
|
|
@ -501,7 +512,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
|
|||
normalName := f.normalizeFlagName(name)
|
||||
flag, ok := f.formal[normalName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such flag -%v", name)
|
||||
return &NotExistError{name: name, messageType: flagNoSuchFlagMessage}
|
||||
}
|
||||
if flag.Annotations == nil {
|
||||
flag.Annotations = map[string][]string{}
|
||||
|
|
@ -538,7 +549,7 @@ func (f *FlagSet) PrintDefaults() {
|
|||
func (f *Flag) defaultIsZeroValue() bool {
|
||||
switch f.Value.(type) {
|
||||
case boolFlag:
|
||||
return f.DefValue == "false"
|
||||
return f.DefValue == "false" || f.DefValue == ""
|
||||
case *durationValue:
|
||||
// Beginning in Go 1.7, duration zero values are "0s"
|
||||
return f.DefValue == "0" || f.DefValue == "0s"
|
||||
|
|
@ -551,7 +562,7 @@ func (f *Flag) defaultIsZeroValue() bool {
|
|||
case *intSliceValue, *stringSliceValue, *stringArrayValue:
|
||||
return f.DefValue == "[]"
|
||||
default:
|
||||
switch f.Value.String() {
|
||||
switch f.DefValue {
|
||||
case "false":
|
||||
return true
|
||||
case "<nil>":
|
||||
|
|
@ -588,8 +599,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
|
|||
|
||||
name = flag.Value.Type()
|
||||
switch name {
|
||||
case "bool":
|
||||
case "bool", "boolfunc":
|
||||
name = ""
|
||||
case "func":
|
||||
name = "value"
|
||||
case "float64":
|
||||
name = "float"
|
||||
case "int64":
|
||||
|
|
@ -707,7 +720,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
|
|||
switch flag.Value.Type() {
|
||||
case "string":
|
||||
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
|
||||
case "bool":
|
||||
case "bool", "boolfunc":
|
||||
if flag.NoOptDefVal != "true" {
|
||||
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
|
||||
}
|
||||
|
|
@ -911,10 +924,9 @@ func VarP(value Value, name, shorthand, usage string) {
|
|||
CommandLine.VarP(value, name, shorthand, usage)
|
||||
}
|
||||
|
||||
// failf prints to standard error a formatted error and usage message and
|
||||
// fail prints an error message and usage message to standard error and
|
||||
// returns the error.
|
||||
func (f *FlagSet) failf(format string, a ...interface{}) error {
|
||||
err := fmt.Errorf(format, a...)
|
||||
func (f *FlagSet) fail(err error) error {
|
||||
if f.errorHandling != ContinueOnError {
|
||||
fmt.Fprintln(f.Output(), err)
|
||||
f.usage()
|
||||
|
|
@ -934,9 +946,9 @@ func (f *FlagSet) usage() {
|
|||
}
|
||||
}
|
||||
|
||||
//--unknown (args will be empty)
|
||||
//--unknown --next-flag ... (args will be --next-flag ...)
|
||||
//--unknown arg ... (args will be arg ...)
|
||||
// --unknown (args will be empty)
|
||||
// --unknown --next-flag ... (args will be --next-flag ...)
|
||||
// --unknown arg ... (args will be arg ...)
|
||||
func stripUnknownFlagValue(args []string) []string {
|
||||
if len(args) == 0 {
|
||||
//--unknown
|
||||
|
|
@ -960,7 +972,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
|
|||
a = args
|
||||
name := s[2:]
|
||||
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
|
||||
err = f.failf("bad flag syntax: %s", s)
|
||||
err = f.fail(&InvalidSyntaxError{specifiedFlag: s})
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -982,7 +994,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
|
|||
|
||||
return stripUnknownFlagValue(a), nil
|
||||
default:
|
||||
err = f.failf("unknown flag: --%s", name)
|
||||
err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1000,13 +1012,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
|
|||
a = a[1:]
|
||||
} else {
|
||||
// '--flag' (arg was required)
|
||||
err = f.failf("flag needs an argument: %s", s)
|
||||
err = f.fail(&ValueRequiredError{
|
||||
flag: flag,
|
||||
specifiedName: name,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
err = fn(flag, value)
|
||||
if err != nil {
|
||||
f.failf(err.Error())
|
||||
f.fail(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -1014,7 +1029,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
|
|||
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
|
||||
outArgs = args
|
||||
|
||||
if strings.HasPrefix(shorthands, "test.") {
|
||||
if isGotestShorthandFlag(shorthands) {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -1039,7 +1054,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
|
|||
outArgs = stripUnknownFlagValue(outArgs)
|
||||
return
|
||||
default:
|
||||
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
|
||||
err = f.fail(&NotExistError{
|
||||
name: string(c),
|
||||
specifiedShorthands: shorthands,
|
||||
messageType: flagUnknownShorthandFlagMessage,
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1062,7 +1081,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
|
|||
outArgs = args[1:]
|
||||
} else {
|
||||
// '-f' (arg was required)
|
||||
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
|
||||
err = f.fail(&ValueRequiredError{
|
||||
flag: flag,
|
||||
specifiedName: string(c),
|
||||
specifiedShorthands: shorthands,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -1072,7 +1095,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
|
|||
|
||||
err = fn(flag, value)
|
||||
if err != nil {
|
||||
f.failf(err.Error())
|
||||
f.fail(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -1135,7 +1158,7 @@ func (f *FlagSet) Parse(arguments []string) error {
|
|||
}
|
||||
f.parsed = true
|
||||
|
||||
if len(arguments) < 0 {
|
||||
if len(arguments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
37
vendor/github.com/spf13/pflag/func.go
generated
vendored
Normal file
37
vendor/github.com/spf13/pflag/func.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
package pflag
|
||||
|
||||
// -- func Value
|
||||
type funcValue func(string) error
|
||||
|
||||
func (f funcValue) Set(s string) error { return f(s) }
|
||||
|
||||
func (f funcValue) Type() string { return "func" }
|
||||
|
||||
func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package
|
||||
|
||||
// Func defines a func flag with specified name, callback function and usage string.
|
||||
//
|
||||
// The callback function will be called every time "--{name}={value}" (or equivalent) is
|
||||
// parsed on the command line, with "{value}" as an argument.
|
||||
func (f *FlagSet) Func(name string, usage string, fn func(string) error) {
|
||||
f.FuncP(name, "", usage, fn)
|
||||
}
|
||||
|
||||
// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) {
|
||||
var val Value = funcValue(fn)
|
||||
f.VarP(val, name, shorthand, usage)
|
||||
}
|
||||
|
||||
// Func defines a func flag with specified name, callback function and usage string.
|
||||
//
|
||||
// The callback function will be called every time "--{name}={value}" (or equivalent) is
|
||||
// parsed on the command line, with "{value}" as an argument.
|
||||
func Func(name string, usage string, fn func(string) error) {
|
||||
CommandLine.FuncP(name, "", usage, fn)
|
||||
}
|
||||
|
||||
// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash.
|
||||
func FuncP(name, shorthand string, usage string, fn func(string) error) {
|
||||
CommandLine.FuncP(name, shorthand, usage, fn)
|
||||
}
|
||||
22
vendor/github.com/spf13/pflag/golangflag.go
generated
vendored
22
vendor/github.com/spf13/pflag/golangflag.go
generated
vendored
|
|
@ -10,6 +10,15 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// go test flags prefixes
|
||||
func isGotestFlag(flag string) bool {
|
||||
return strings.HasPrefix(flag, "-test.")
|
||||
}
|
||||
|
||||
func isGotestShorthandFlag(flag string) bool {
|
||||
return strings.HasPrefix(flag, "test.")
|
||||
}
|
||||
|
||||
// flagValueWrapper implements pflag.Value around a flag.Value. The main
|
||||
// difference here is the addition of the Type method that returns a string
|
||||
// name of the type. As this is generally unknown, we approximate that with
|
||||
|
|
@ -103,3 +112,16 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
|
|||
}
|
||||
f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
|
||||
}
|
||||
|
||||
// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(),
|
||||
// since by default those are skipped by pflag.Parse().
|
||||
// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)`
|
||||
func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error {
|
||||
var skippedFlags []string
|
||||
for _, f := range osArgs {
|
||||
if isGotestFlag(f) {
|
||||
skippedFlags = append(skippedFlags, f)
|
||||
}
|
||||
}
|
||||
return goFlagSet.Parse(skippedFlags)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/spf13/pflag/ipnet_slice.go
generated
vendored
2
vendor/github.com/spf13/pflag/ipnet_slice.go
generated
vendored
|
|
@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string {
|
|||
|
||||
func ipNetSliceConv(val string) (interface{}, error) {
|
||||
val = strings.Trim(val, "[]")
|
||||
// Emtpy string would cause a slice with one (empty) entry
|
||||
// Empty string would cause a slice with one (empty) entry
|
||||
if len(val) == 0 {
|
||||
return []net.IPNet{}, nil
|
||||
}
|
||||
|
|
|
|||
81
vendor/github.com/spf13/pflag/text.go
generated
vendored
Normal file
81
vendor/github.com/spf13/pflag/text.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// following is copied from go 1.23.4 flag.go
|
||||
type textValue struct{ p encoding.TextUnmarshaler }
|
||||
|
||||
func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue {
|
||||
ptrVal := reflect.ValueOf(p)
|
||||
if ptrVal.Kind() != reflect.Ptr {
|
||||
panic("variable value type must be a pointer")
|
||||
}
|
||||
defVal := reflect.ValueOf(val)
|
||||
if defVal.Kind() == reflect.Ptr {
|
||||
defVal = defVal.Elem()
|
||||
}
|
||||
if defVal.Type() != ptrVal.Type().Elem() {
|
||||
panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem()))
|
||||
}
|
||||
ptrVal.Elem().Set(defVal)
|
||||
return textValue{p}
|
||||
}
|
||||
|
||||
func (v textValue) Set(s string) error {
|
||||
return v.p.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
func (v textValue) Get() interface{} {
|
||||
return v.p
|
||||
}
|
||||
|
||||
func (v textValue) String() string {
|
||||
if m, ok := v.p.(encoding.TextMarshaler); ok {
|
||||
if b, err := m.MarshalText(); err == nil {
|
||||
return string(b)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
//end of copy
|
||||
|
||||
func (v textValue) Type() string {
|
||||
return reflect.ValueOf(v.p).Type().Name()
|
||||
}
|
||||
|
||||
// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name
|
||||
func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
return fmt.Errorf("flag accessed but not defined: %s", name)
|
||||
}
|
||||
if flag.Value.Type() != reflect.TypeOf(out).Name() {
|
||||
return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type())
|
||||
}
|
||||
return out.UnmarshalText([]byte(flag.Value.String()))
|
||||
}
|
||||
|
||||
// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p.
|
||||
func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) {
|
||||
f.VarP(newTextValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) {
|
||||
f.VarP(newTextValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p.
|
||||
func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) {
|
||||
CommandLine.VarP(newTextValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) {
|
||||
CommandLine.VarP(newTextValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
118
vendor/github.com/spf13/pflag/time.go
generated
vendored
Normal file
118
vendor/github.com/spf13/pflag/time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimeValue adapts time.Time for use as a flag.
|
||||
type timeValue struct {
|
||||
*time.Time
|
||||
formats []string
|
||||
}
|
||||
|
||||
func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue {
|
||||
*p = val
|
||||
return &timeValue{
|
||||
Time: p,
|
||||
formats: formats,
|
||||
}
|
||||
}
|
||||
|
||||
// Set time.Time value from string based on accepted formats.
|
||||
func (d *timeValue) Set(s string) error {
|
||||
s = strings.TrimSpace(s)
|
||||
for _, f := range d.formats {
|
||||
v, err := time.Parse(f, s)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
*d.Time = v
|
||||
return nil
|
||||
}
|
||||
|
||||
formatsString := ""
|
||||
for i, f := range d.formats {
|
||||
if i > 0 {
|
||||
formatsString += ", "
|
||||
}
|
||||
formatsString += fmt.Sprintf("`%s`", f)
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString)
|
||||
}
|
||||
|
||||
// Type name for time.Time flags.
|
||||
func (d *timeValue) Type() string {
|
||||
return "time"
|
||||
}
|
||||
|
||||
func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) }
|
||||
|
||||
// GetTime return the time value of a flag with the given name
|
||||
func (f *FlagSet) GetTime(name string) (time.Time, error) {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
err := fmt.Errorf("flag accessed but not defined: %s", name)
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
if flag.Value.Type() != "time" {
|
||||
err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type())
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
val, ok := flag.Value.(*timeValue)
|
||||
if !ok {
|
||||
return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value)
|
||||
}
|
||||
|
||||
return *val.Time, nil
|
||||
}
|
||||
|
||||
// TimeVar defines a time.Time flag with specified name, default value, and usage string.
|
||||
// The argument p points to a time.Time variable in which to store the value of the flag.
|
||||
func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) {
|
||||
f.TimeVarP(p, name, "", value, formats, usage)
|
||||
}
|
||||
|
||||
// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) {
|
||||
f.VarP(newTimeValue(value, p, formats), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// TimeVar defines a time.Time flag with specified name, default value, and usage string.
|
||||
// The argument p points to a time.Time variable in which to store the value of the flag.
|
||||
func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) {
|
||||
CommandLine.TimeVarP(p, name, "", value, formats, usage)
|
||||
}
|
||||
|
||||
// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) {
|
||||
CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// Time defines a time.Time flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a time.Time variable that stores the value of the flag.
|
||||
func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time {
|
||||
return f.TimeP(name, "", value, formats, usage)
|
||||
}
|
||||
|
||||
// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time {
|
||||
p := new(time.Time)
|
||||
f.TimeVarP(p, name, shorthand, value, formats, usage)
|
||||
return p
|
||||
}
|
||||
|
||||
// Time defines a time.Time flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a time.Time variable that stores the value of the flag.
|
||||
func Time(name string, value time.Time, formats []string, usage string) *time.Time {
|
||||
return CommandLine.TimeP(name, "", value, formats, usage)
|
||||
}
|
||||
|
||||
// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash.
|
||||
func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time {
|
||||
return CommandLine.TimeP(name, shorthand, value, formats, usage)
|
||||
}
|
||||
54
vendor/github.com/tdewolff/minify/v2/README.md
generated
vendored
54
vendor/github.com/tdewolff/minify/v2/README.md
generated
vendored
|
|
@ -381,6 +381,24 @@ m.AddFunc("image/svg+xml", svg.Minify)
|
|||
m.AddFuncRegexp(regexp.MustCompile("^(application|text)/(x-)?(java|ecma)script$"), js.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
|
||||
|
||||
m.AddFunc("importmap", json.Minify)
|
||||
m.AddFunc("speculationrules", json.Minify)
|
||||
|
||||
aspMinifier := &html.Minifier{}
|
||||
aspMinifier.TemplateDelims = [2]string{"<%", "%>"}
|
||||
m.Add("text/asp", aspMinifier)
|
||||
m.Add("text/x-ejs-template", aspMinifier)
|
||||
|
||||
phpMinifier := &html.Minifier{}
|
||||
phpMinifier.TemplateDelims = [2]string{"<?", "?>"} // also handles <?php
|
||||
m.Add("application/x-httpd-php", phpMinifier)
|
||||
|
||||
tmplMinifier := &html.Minifier{}
|
||||
tmplMinifier.TemplateDelims = [2]string{"{{", "}}"}
|
||||
m.Add("text/x-go-template", tmplMinifier)
|
||||
m.Add("text/x-mustache-template", tmplMinifier)
|
||||
m.Add("text/x-handlebars-template", tmplMinifier)
|
||||
```
|
||||
|
||||
You can set options to several minifiers.
|
||||
|
|
@ -514,6 +532,24 @@ func main() {
|
|||
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
|
||||
|
||||
m.AddFunc("importmap", json.Minify)
|
||||
m.AddFunc("speculationrules", json.Minify)
|
||||
|
||||
aspMinifier := &html.Minifier{}
|
||||
aspMinifier.TemplateDelims = [2]string{"<%", "%>"}
|
||||
m.Add("text/asp", aspMinifier)
|
||||
m.Add("text/x-ejs-template", aspMinifier)
|
||||
|
||||
phpMinifier := &html.Minifier{}
|
||||
phpMinifier.TemplateDelims = [2]string{"<?", "?>"} // also handles <?php
|
||||
m.Add("application/x-httpd-php", phpMinifier)
|
||||
|
||||
tmplMinifier := &html.Minifier{}
|
||||
tmplMinifier.TemplateDelims = [2]string{"{{", "}}"}
|
||||
m.Add("text/x-go-template", tmplMinifier)
|
||||
m.Add("text/x-mustache-template", tmplMinifier)
|
||||
m.Add("text/x-handlebars-template", tmplMinifier)
|
||||
|
||||
if err := m.Minify("text/html", os.Stdout, os.Stdin); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
@ -604,6 +640,24 @@ func main() {
|
|||
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
|
||||
|
||||
m.AddFunc("importmap", json.Minify)
|
||||
m.AddFunc("speculationrules", json.Minify)
|
||||
|
||||
aspMinifier := &html.Minifier{}
|
||||
aspMinifier.TemplateDelims = [2]string{"<%", "%>"}
|
||||
m.Add("text/asp", aspMinifier)
|
||||
m.Add("text/x-ejs-template", aspMinifier)
|
||||
|
||||
phpMinifier := &html.Minifier{}
|
||||
phpMinifier.TemplateDelims = [2]string{"<?", "?>"} // also handles <?php
|
||||
m.Add("application/x-httpd-php", phpMinifier)
|
||||
|
||||
tmplMinifier := &html.Minifier{}
|
||||
tmplMinifier.TemplateDelims = [2]string{"{{", "}}"}
|
||||
m.Add("text/x-go-template", tmplMinifier)
|
||||
m.Add("text/x-mustache-template", tmplMinifier)
|
||||
m.Add("text/x-handlebars-template", tmplMinifier)
|
||||
|
||||
fs := http.FileServer(http.Dir("www/"))
|
||||
http.Handle("/", m.MiddlewareWithError(fs))
|
||||
}
|
||||
|
|
|
|||
16
vendor/github.com/uptrace/bun/CHANGELOG.md
generated
vendored
16
vendor/github.com/uptrace/bun/CHANGELOG.md
generated
vendored
|
|
@ -1,3 +1,19 @@
|
|||
## [1.2.15](https://github.com/uptrace/bun/compare/v1.2.14...v1.2.15) (2025-07-17)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **pgdriver:** add mandatory space before negative numbers to resolve CVE-2024-34359 ([8067a8f](https://github.com/uptrace/bun/commit/8067a8f13f8d22fb57b76d6800f7aefc12b044cd))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **db:** rename CleanQueryHook to ResetQueryHooks ([cb17679](https://github.com/uptrace/bun/commit/cb176796f5fbae8b3ea44e67875dd00ecf689425))
|
||||
* **db:** support clean query hooks ([a5f19a7](https://github.com/uptrace/bun/commit/a5f19a7c0d68fd44eaff99ebaaeb88ca089d7538)), closes [#1226](https://github.com/uptrace/bun/issues/1226)
|
||||
* **dialect:** return default on update/delete when create table ([d347b48](https://github.com/uptrace/bun/commit/d347b48c7764a23000a28ca3ad40368b8b89e298)), closes [#1212](https://github.com/uptrace/bun/issues/1212)
|
||||
|
||||
|
||||
|
||||
## [1.2.14](https://github.com/uptrace/bun/compare/v1.2.13...v1.2.14) (2025-06-16)
|
||||
|
||||
|
||||
|
|
|
|||
7
vendor/github.com/uptrace/bun/db.go
generated
vendored
7
vendor/github.com/uptrace/bun/db.go
generated
vendored
|
|
@ -235,6 +235,13 @@ func (db *DB) AddQueryHook(hook QueryHook) {
|
|||
db.queryHooks = append(db.queryHooks, hook)
|
||||
}
|
||||
|
||||
func (db *DB) ResetQueryHooks() {
|
||||
for i := range db.queryHooks {
|
||||
db.queryHooks[i] = nil
|
||||
}
|
||||
db.queryHooks = nil
|
||||
}
|
||||
|
||||
func (db *DB) Table(typ reflect.Type) *schema.Table {
|
||||
return db.dialect.Tables().Get(typ)
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/uptrace/bun/dialect/feature/feature.go
generated
vendored
2
vendor/github.com/uptrace/bun/dialect/feature/feature.go
generated
vendored
|
|
@ -41,6 +41,7 @@ const (
|
|||
DeleteOrderLimit // DELETE ... ORDER BY ... LIMIT ...
|
||||
DeleteReturning
|
||||
AlterColumnExists // ADD/DROP COLUMN IF NOT EXISTS/IF EXISTS
|
||||
FKDefaultOnAction // FK ON UPDATE/ON DELETE has default value: NO ACTION
|
||||
)
|
||||
|
||||
type NotSupportError struct {
|
||||
|
|
@ -91,4 +92,5 @@ var flag2str = map[Feature]string{
|
|||
DeleteOrderLimit: "DeleteOrderLimit",
|
||||
DeleteReturning: "DeleteReturning",
|
||||
AlterColumnExists: "AlterColumnExists",
|
||||
FKDefaultOnAction: "FKDefaultOnAction",
|
||||
}
|
||||
|
|
|
|||
1
vendor/github.com/uptrace/bun/dialect/pgdialect/dialect.go
generated
vendored
1
vendor/github.com/uptrace/bun/dialect/pgdialect/dialect.go
generated
vendored
|
|
@ -55,6 +55,7 @@ func New(opts ...DialectOption) *Dialect {
|
|||
feature.SelectExists |
|
||||
feature.GeneratedIdentity |
|
||||
feature.CompositeIn |
|
||||
feature.FKDefaultOnAction |
|
||||
feature.DeleteReturning |
|
||||
feature.AlterColumnExists
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/uptrace/bun/dialect/pgdialect/version.go
generated
vendored
2
vendor/github.com/uptrace/bun/dialect/pgdialect/version.go
generated
vendored
|
|
@ -2,5 +2,5 @@ package pgdialect
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.2.14"
|
||||
return "1.2.15"
|
||||
}
|
||||
|
|
|
|||
5
vendor/github.com/uptrace/bun/dialect/sqlitedialect/dialect.go
generated
vendored
5
vendor/github.com/uptrace/bun/dialect/sqlitedialect/dialect.go
generated
vendored
|
|
@ -41,6 +41,7 @@ func New(opts ...DialectOption) *Dialect {
|
|||
feature.SelectExists |
|
||||
feature.AutoIncrement |
|
||||
feature.CompositeIn |
|
||||
feature.FKDefaultOnAction |
|
||||
feature.DeleteReturning
|
||||
|
||||
for _, opt := range opts {
|
||||
|
|
@ -102,7 +103,7 @@ func (d *Dialect) AppendBytes(b []byte, bs []byte) []byte {
|
|||
return b
|
||||
}
|
||||
|
||||
func (d *Dialect) DefaultVarcharLen() int {
|
||||
func (*Dialect) DefaultVarcharLen() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
|
|
@ -132,7 +133,7 @@ func (d *Dialect) AppendSequence(b []byte, table *schema.Table, field *schema.Fi
|
|||
// DefaultSchemaName is the "schema-name" of the main database.
|
||||
// The details might differ from other dialects, but for all means and purposes
|
||||
// "main" is the default schema in an SQLite database.
|
||||
func (d *Dialect) DefaultSchema() string {
|
||||
func (*Dialect) DefaultSchema() string {
|
||||
return "main"
|
||||
}
|
||||
|
||||
|
|
|
|||
2
vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go
generated
vendored
2
vendor/github.com/uptrace/bun/dialect/sqlitedialect/version.go
generated
vendored
|
|
@ -2,5 +2,5 @@ package sqlitedialect
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.2.14"
|
||||
return "1.2.15"
|
||||
}
|
||||
|
|
|
|||
2
vendor/github.com/uptrace/bun/package.json
generated
vendored
2
vendor/github.com/uptrace/bun/package.json
generated
vendored
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "gobun",
|
||||
"version": "1.2.14",
|
||||
"version": "1.2.15",
|
||||
"main": "index.js",
|
||||
"repository": "git@github.com:uptrace/bun.git",
|
||||
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
|
||||
|
|
|
|||
24
vendor/github.com/uptrace/bun/query_table_create.go
generated
vendored
24
vendor/github.com/uptrace/bun/query_table_create.go
generated
vendored
|
|
@ -318,15 +318,23 @@ func (q *CreateTableQuery) appendFKConstraintsRel(fmter schema.Formatter, b []by
|
|||
|
||||
for _, key := range keys {
|
||||
if rel := relations[key]; rel.References() {
|
||||
query := "(?) REFERENCES ? (?)"
|
||||
args := []any{
|
||||
Safe(appendColumns(nil, "", rel.BasePKs)),
|
||||
rel.JoinTable.SQLName,
|
||||
Safe(appendColumns(nil, "", rel.JoinPKs)),
|
||||
}
|
||||
if len(rel.OnUpdate) > 0 {
|
||||
query += " ?"
|
||||
args = append(args, Safe(rel.OnUpdate))
|
||||
}
|
||||
if len(rel.OnDelete) > 0 {
|
||||
query += " ?"
|
||||
args = append(args, Safe(rel.OnDelete))
|
||||
}
|
||||
b, err = q.appendFK(fmter, b, schema.QueryWithArgs{
|
||||
Query: "(?) REFERENCES ? (?) ? ?",
|
||||
Args: []interface{}{
|
||||
Safe(appendColumns(nil, "", rel.BasePKs)),
|
||||
rel.JoinTable.SQLName,
|
||||
Safe(appendColumns(nil, "", rel.JoinPKs)),
|
||||
Safe(rel.OnUpdate),
|
||||
Safe(rel.OnDelete),
|
||||
},
|
||||
Query: query,
|
||||
Args: args,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
7
vendor/github.com/uptrace/bun/schema/table.go
generated
vendored
7
vendor/github.com/uptrace/bun/schema/table.go
generated
vendored
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/jinzhu/inflection"
|
||||
|
||||
"github.com/uptrace/bun/dialect/feature"
|
||||
"github.com/uptrace/bun/internal"
|
||||
"github.com/uptrace/bun/internal/tagparser"
|
||||
)
|
||||
|
|
@ -623,7 +624,10 @@ func (t *Table) belongsToRelation(field *Field) *Relation {
|
|||
rel.Condition = field.Tag.Options["join_on"]
|
||||
}
|
||||
|
||||
rel.OnUpdate = "ON UPDATE NO ACTION"
|
||||
if t.dialect.Features().Has(feature.FKDefaultOnAction) {
|
||||
rel.OnUpdate = "ON UPDATE NO ACTION"
|
||||
rel.OnDelete = "ON DELETE NO ACTION"
|
||||
}
|
||||
if onUpdate, ok := field.Tag.Options["on_update"]; ok {
|
||||
if len(onUpdate) > 1 {
|
||||
panic(fmt.Errorf("bun: %s belongs-to %s: on_update option must be a single field", t.TypeName, field.GoName))
|
||||
|
|
@ -638,7 +642,6 @@ func (t *Table) belongsToRelation(field *Field) *Relation {
|
|||
rel.OnUpdate = s
|
||||
}
|
||||
|
||||
rel.OnDelete = "ON DELETE NO ACTION"
|
||||
if onDelete, ok := field.Tag.Options["on_delete"]; ok {
|
||||
if len(onDelete) > 1 {
|
||||
panic(fmt.Errorf("bun: %s belongs-to %s: on_delete option must be a single field", t.TypeName, field.GoName))
|
||||
|
|
|
|||
2
vendor/github.com/uptrace/bun/version.go
generated
vendored
2
vendor/github.com/uptrace/bun/version.go
generated
vendored
|
|
@ -2,5 +2,5 @@ package bun
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.2.14"
|
||||
return "1.2.15"
|
||||
}
|
||||
|
|
|
|||
30
vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
generated
vendored
30
vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
generated
vendored
|
|
@ -199,3 +199,33 @@
|
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
13
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
13
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
|
|
@ -4,11 +4,9 @@
|
|||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
|
|
@ -139,17 +137,6 @@ func WithoutScopeInfo() Option {
|
|||
// have special behavior based on their name.
|
||||
func WithNamespace(ns string) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
|
||||
logDeprecatedLegacyScheme()
|
||||
// Only sanitize if prometheus does not support UTF-8.
|
||||
ns = model.EscapeName(ns, model.NameEscapingScheme)
|
||||
}
|
||||
if !strings.HasSuffix(ns, "_") {
|
||||
// namespace and metric names should be separated with an underscore,
|
||||
// adds a trailing underscore if there is not one already.
|
||||
ns = ns + "_"
|
||||
}
|
||||
|
||||
cfg.namespace = ns
|
||||
return cfg
|
||||
})
|
||||
|
|
|
|||
213
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
213
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/otlptranslator"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
|
|
@ -27,16 +28,12 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
targetInfoMetricName = "target_info"
|
||||
targetInfoDescription = "Target metadata"
|
||||
|
||||
scopeLabelPrefix = "otel_scope_"
|
||||
scopeNameLabel = scopeLabelPrefix + "name"
|
||||
scopeVersionLabel = scopeLabelPrefix + "version"
|
||||
scopeSchemaLabel = scopeLabelPrefix + "schema_url"
|
||||
|
||||
traceIDExemplarKey = "trace_id"
|
||||
spanIDExemplarKey = "span_id"
|
||||
)
|
||||
|
||||
var metricsPool = sync.Pool{
|
||||
|
|
@ -93,12 +90,11 @@ type collector struct {
|
|||
targetInfo prometheus.Metric
|
||||
metricFamilies map[string]*dto.MetricFamily
|
||||
resourceKeyVals keyVals
|
||||
metricNamer otlptranslator.MetricNamer
|
||||
labelNamer otlptranslator.LabelNamer
|
||||
unitNamer otlptranslator.UnitNamer
|
||||
}
|
||||
|
||||
// prometheus counters MUST have a _total suffix by default:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/compatibility/prometheus_and_openmetrics.md
|
||||
const counterSuffix = "total"
|
||||
|
||||
// New returns a Prometheus Exporter.
|
||||
func New(opts ...Option) (*Exporter, error) {
|
||||
cfg := newConfig(opts...)
|
||||
|
|
@ -108,6 +104,12 @@ func New(opts ...Option) (*Exporter, error) {
|
|||
// TODO (#3244): Enable some way to configure the reader, but not change temporality.
|
||||
reader := metric.NewManualReader(cfg.readerOpts...)
|
||||
|
||||
utf8Allowed := model.NameValidationScheme == model.UTF8Validation // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
|
||||
if !utf8Allowed {
|
||||
// Only sanitize if prometheus does not support UTF-8.
|
||||
logDeprecatedLegacyScheme()
|
||||
}
|
||||
labelNamer := otlptranslator.LabelNamer{UTF8Allowed: utf8Allowed}
|
||||
collector := &collector{
|
||||
reader: reader,
|
||||
disableTargetInfo: cfg.disableTargetInfo,
|
||||
|
|
@ -115,8 +117,18 @@ func New(opts ...Option) (*Exporter, error) {
|
|||
withoutCounterSuffixes: cfg.withoutCounterSuffixes,
|
||||
disableScopeInfo: cfg.disableScopeInfo,
|
||||
metricFamilies: make(map[string]*dto.MetricFamily),
|
||||
namespace: cfg.namespace,
|
||||
namespace: labelNamer.Build(cfg.namespace),
|
||||
resourceAttributesFilter: cfg.resourceAttributesFilter,
|
||||
metricNamer: otlptranslator.MetricNamer{
|
||||
Namespace: cfg.namespace,
|
||||
// We decide whether to pass type and unit to the netricNamer based
|
||||
// on whether units or counter suffixes are enabled, and keep this
|
||||
// always enabled.
|
||||
WithMetricSuffixes: true,
|
||||
UTF8Allowed: utf8Allowed,
|
||||
},
|
||||
unitNamer: otlptranslator.UnitNamer{UTF8Allowed: utf8Allowed},
|
||||
labelNamer: labelNamer,
|
||||
}
|
||||
|
||||
if err := cfg.registerer.Register(collector); err != nil {
|
||||
|
|
@ -164,7 +176,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||
defer c.mu.Unlock()
|
||||
|
||||
if c.targetInfo == nil && !c.disableTargetInfo {
|
||||
targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource)
|
||||
targetInfo, err := c.createInfoMetric(
|
||||
otlptranslator.TargetInfoMetricName,
|
||||
targetInfoDescription,
|
||||
metrics.Resource,
|
||||
)
|
||||
if err != nil {
|
||||
// If the target info metric is invalid, disable sending it.
|
||||
c.disableTargetInfo = true
|
||||
|
|
@ -195,7 +211,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||
kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel)
|
||||
kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL)
|
||||
|
||||
attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes)
|
||||
attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes, c.labelNamer)
|
||||
for i := range attrKeys {
|
||||
attrKeys[i] = scopeLabelPrefix + attrKeys[i]
|
||||
}
|
||||
|
|
@ -211,7 +227,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||
if typ == nil {
|
||||
continue
|
||||
}
|
||||
name := c.getName(m, typ)
|
||||
name := c.getName(m)
|
||||
|
||||
drop, help := c.validateMetrics(name, m.Description, typ)
|
||||
if drop {
|
||||
|
|
@ -224,21 +240,21 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||
|
||||
switch v := m.Data.(type) {
|
||||
case metricdata.Histogram[int64]:
|
||||
addHistogramMetric(ch, v, m, name, kv)
|
||||
addHistogramMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.Histogram[float64]:
|
||||
addHistogramMetric(ch, v, m, name, kv)
|
||||
addHistogramMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.ExponentialHistogram[int64]:
|
||||
addExponentialHistogramMetric(ch, v, m, name, kv)
|
||||
addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.ExponentialHistogram[float64]:
|
||||
addExponentialHistogramMetric(ch, v, m, name, kv)
|
||||
addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.Sum[int64]:
|
||||
addSumMetric(ch, v, m, name, kv)
|
||||
addSumMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.Sum[float64]:
|
||||
addSumMetric(ch, v, m, name, kv)
|
||||
addSumMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.Gauge[int64]:
|
||||
addGaugeMetric(ch, v, m, name, kv)
|
||||
addGaugeMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
case metricdata.Gauge[float64]:
|
||||
addGaugeMetric(ch, v, m, name, kv)
|
||||
addGaugeMetric(ch, v, m, name, kv, c.labelNamer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -303,9 +319,10 @@ func addExponentialHistogramMetric[N int64 | float64](
|
|||
m metricdata.Metrics,
|
||||
name string,
|
||||
kv keyVals,
|
||||
labelNamer otlptranslator.LabelNamer,
|
||||
) {
|
||||
for _, dp := range histogram.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
keys, values := getAttrs(dp.Attributes, labelNamer)
|
||||
keys = append(keys, kv.keys...)
|
||||
values = append(values, kv.vals...)
|
||||
|
||||
|
|
@ -377,9 +394,10 @@ func addHistogramMetric[N int64 | float64](
|
|||
m metricdata.Metrics,
|
||||
name string,
|
||||
kv keyVals,
|
||||
labelNamer otlptranslator.LabelNamer,
|
||||
) {
|
||||
for _, dp := range histogram.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
keys, values := getAttrs(dp.Attributes, labelNamer)
|
||||
keys = append(keys, kv.keys...)
|
||||
values = append(values, kv.vals...)
|
||||
|
||||
|
|
@ -396,7 +414,7 @@ func addHistogramMetric[N int64 | float64](
|
|||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
m = addExemplars(m, dp.Exemplars)
|
||||
m = addExemplars(m, dp.Exemplars, labelNamer)
|
||||
ch <- m
|
||||
}
|
||||
}
|
||||
|
|
@ -407,6 +425,7 @@ func addSumMetric[N int64 | float64](
|
|||
m metricdata.Metrics,
|
||||
name string,
|
||||
kv keyVals,
|
||||
labelNamer otlptranslator.LabelNamer,
|
||||
) {
|
||||
valueType := prometheus.CounterValue
|
||||
if !sum.IsMonotonic {
|
||||
|
|
@ -414,7 +433,7 @@ func addSumMetric[N int64 | float64](
|
|||
}
|
||||
|
||||
for _, dp := range sum.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
keys, values := getAttrs(dp.Attributes, labelNamer)
|
||||
keys = append(keys, kv.keys...)
|
||||
values = append(values, kv.vals...)
|
||||
|
||||
|
|
@ -427,7 +446,7 @@ func addSumMetric[N int64 | float64](
|
|||
// GaugeValues don't support Exemplars at this time
|
||||
// https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199
|
||||
if valueType != prometheus.GaugeValue {
|
||||
m = addExemplars(m, dp.Exemplars)
|
||||
m = addExemplars(m, dp.Exemplars, labelNamer)
|
||||
}
|
||||
ch <- m
|
||||
}
|
||||
|
|
@ -439,9 +458,10 @@ func addGaugeMetric[N int64 | float64](
|
|||
m metricdata.Metrics,
|
||||
name string,
|
||||
kv keyVals,
|
||||
labelNamer otlptranslator.LabelNamer,
|
||||
) {
|
||||
for _, dp := range gauge.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
keys, values := getAttrs(dp.Attributes, labelNamer)
|
||||
keys = append(keys, kv.keys...)
|
||||
values = append(values, kv.vals...)
|
||||
|
||||
|
|
@ -457,12 +477,12 @@ func addGaugeMetric[N int64 | float64](
|
|||
|
||||
// getAttrs converts the attribute.Set to two lists of matching Prometheus-style
|
||||
// keys and values.
|
||||
func getAttrs(attrs attribute.Set) ([]string, []string) {
|
||||
func getAttrs(attrs attribute.Set, labelNamer otlptranslator.LabelNamer) ([]string, []string) {
|
||||
keys := make([]string, 0, attrs.Len())
|
||||
values := make([]string, 0, attrs.Len())
|
||||
itr := attrs.Iter()
|
||||
|
||||
if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
|
||||
if labelNamer.UTF8Allowed {
|
||||
// Do not perform sanitization if prometheus supports UTF-8.
|
||||
for itr.Next() {
|
||||
kv := itr.Attribute()
|
||||
|
|
@ -475,7 +495,7 @@ func getAttrs(attrs attribute.Set) ([]string, []string) {
|
|||
keysMap := make(map[string][]string)
|
||||
for itr.Next() {
|
||||
kv := itr.Attribute()
|
||||
key := model.EscapeName(string(kv.Key), model.NameEscapingScheme)
|
||||
key := labelNamer.Build(string(kv.Key))
|
||||
if _, ok := keysMap[key]; !ok {
|
||||
keysMap[key] = []string{kv.Value.Emit()}
|
||||
} else {
|
||||
|
|
@ -492,91 +512,22 @@ func getAttrs(attrs attribute.Set) ([]string, []string) {
|
|||
return keys, values
|
||||
}
|
||||
|
||||
func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
|
||||
keys, values := getAttrs(*res.Set())
|
||||
func (c *collector) createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
|
||||
keys, values := getAttrs(*res.Set(), c.labelNamer)
|
||||
desc := prometheus.NewDesc(name, description, keys, nil)
|
||||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
||||
}
|
||||
|
||||
func unitMapGetOrDefault(unit string) string {
|
||||
if promUnit, ok := unitSuffixes[unit]; ok {
|
||||
return promUnit
|
||||
}
|
||||
return unit
|
||||
}
|
||||
|
||||
var unitSuffixes = map[string]string{
|
||||
// Time
|
||||
"d": "days",
|
||||
"h": "hours",
|
||||
"min": "minutes",
|
||||
"s": "seconds",
|
||||
"ms": "milliseconds",
|
||||
"us": "microseconds",
|
||||
"ns": "nanoseconds",
|
||||
|
||||
// Bytes
|
||||
"By": "bytes",
|
||||
"KiBy": "kibibytes",
|
||||
"MiBy": "mebibytes",
|
||||
"GiBy": "gibibytes",
|
||||
"TiBy": "tibibytes",
|
||||
"KBy": "kilobytes",
|
||||
"MBy": "megabytes",
|
||||
"GBy": "gigabytes",
|
||||
"TBy": "terabytes",
|
||||
|
||||
// SI
|
||||
"m": "meters",
|
||||
"V": "volts",
|
||||
"A": "amperes",
|
||||
"J": "joules",
|
||||
"W": "watts",
|
||||
"g": "grams",
|
||||
|
||||
// Misc
|
||||
"Cel": "celsius",
|
||||
"Hz": "hertz",
|
||||
"1": "ratio",
|
||||
"%": "percent",
|
||||
}
|
||||
|
||||
// getName returns the sanitized name, prefixed with the namespace and suffixed with unit.
|
||||
func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
|
||||
name := m.Name
|
||||
if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
|
||||
// Only sanitize if prometheus does not support UTF-8.
|
||||
logDeprecatedLegacyScheme()
|
||||
name = model.EscapeName(name, model.NameEscapingScheme)
|
||||
func (c *collector) getName(m metricdata.Metrics) string {
|
||||
translatorMetric := otlptranslator.Metric{
|
||||
Name: m.Name,
|
||||
Type: c.namingMetricType(m),
|
||||
}
|
||||
addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER
|
||||
if addCounterSuffix {
|
||||
// Remove the _total suffix here, as we will re-add the total suffix
|
||||
// later, and it needs to come after the unit suffix.
|
||||
name = strings.TrimSuffix(name, counterSuffix)
|
||||
// If the last character is an underscore, or would be converted to an underscore, trim it from the name.
|
||||
// an underscore will be added back in later.
|
||||
if convertsToUnderscore(rune(name[len(name)-1])) {
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
if !c.withoutUnits {
|
||||
translatorMetric.Unit = m.Unit
|
||||
}
|
||||
if c.namespace != "" {
|
||||
name = c.namespace + name
|
||||
}
|
||||
if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
|
||||
name += "_" + suffix
|
||||
}
|
||||
if addCounterSuffix {
|
||||
name += "_" + counterSuffix
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// convertsToUnderscore returns true if the character would be converted to an
|
||||
// underscore when the escaping scheme is underscore escaping. This is meant to
|
||||
// capture any character that should be considered a "delimiter".
|
||||
func convertsToUnderscore(b rune) bool {
|
||||
return (b < 'a' || b > 'z') && (b < 'A' || b > 'Z') && b != ':' && (b < '0' || b > '9')
|
||||
return c.metricNamer.Build(translatorMetric)
|
||||
}
|
||||
|
||||
func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
|
||||
|
|
@ -601,12 +552,41 @@ func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
|
|||
return nil
|
||||
}
|
||||
|
||||
// namingMetricType provides the metric type for naming purposes.
|
||||
func (c *collector) namingMetricType(m metricdata.Metrics) otlptranslator.MetricType {
|
||||
switch v := m.Data.(type) {
|
||||
case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]:
|
||||
return otlptranslator.MetricTypeHistogram
|
||||
case metricdata.Histogram[int64], metricdata.Histogram[float64]:
|
||||
return otlptranslator.MetricTypeHistogram
|
||||
case metricdata.Sum[float64]:
|
||||
// If counter suffixes are disabled, treat them like non-monotonic
|
||||
// suffixes for the purposes of naming.
|
||||
if v.IsMonotonic && !c.withoutCounterSuffixes {
|
||||
return otlptranslator.MetricTypeMonotonicCounter
|
||||
}
|
||||
return otlptranslator.MetricTypeNonMonotonicCounter
|
||||
case metricdata.Sum[int64]:
|
||||
// If counter suffixes are disabled, treat them like non-monotonic
|
||||
// suffixes for the purposes of naming.
|
||||
if v.IsMonotonic && !c.withoutCounterSuffixes {
|
||||
return otlptranslator.MetricTypeMonotonicCounter
|
||||
}
|
||||
return otlptranslator.MetricTypeNonMonotonicCounter
|
||||
case metricdata.Gauge[int64], metricdata.Gauge[float64]:
|
||||
return otlptranslator.MetricTypeGauge
|
||||
case metricdata.Summary:
|
||||
return otlptranslator.MetricTypeSummary
|
||||
}
|
||||
return otlptranslator.MetricTypeUnknown
|
||||
}
|
||||
|
||||
func (c *collector) createResourceAttributes(res *resource.Resource) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
|
||||
resourceKeys, resourceValues := getAttrs(resourceAttrs)
|
||||
resourceKeys, resourceValues := getAttrs(resourceAttrs, c.labelNamer)
|
||||
c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
|
||||
}
|
||||
|
||||
|
|
@ -648,16 +628,20 @@ func (c *collector) validateMetrics(name, description string, metricType *dto.Me
|
|||
return false, ""
|
||||
}
|
||||
|
||||
func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata.Exemplar[N]) prometheus.Metric {
|
||||
func addExemplars[N int64 | float64](
|
||||
m prometheus.Metric,
|
||||
exemplars []metricdata.Exemplar[N],
|
||||
labelNamer otlptranslator.LabelNamer,
|
||||
) prometheus.Metric {
|
||||
if len(exemplars) == 0 {
|
||||
return m
|
||||
}
|
||||
promExemplars := make([]prometheus.Exemplar, len(exemplars))
|
||||
for i, exemplar := range exemplars {
|
||||
labels := attributesToLabels(exemplar.FilteredAttributes)
|
||||
labels := attributesToLabels(exemplar.FilteredAttributes, labelNamer)
|
||||
// Overwrite any existing trace ID or span ID attributes
|
||||
labels[traceIDExemplarKey] = hex.EncodeToString(exemplar.TraceID[:])
|
||||
labels[spanIDExemplarKey] = hex.EncodeToString(exemplar.SpanID[:])
|
||||
labels[otlptranslator.ExemplarTraceIDKey] = hex.EncodeToString(exemplar.TraceID[:])
|
||||
labels[otlptranslator.ExemplarSpanIDKey] = hex.EncodeToString(exemplar.SpanID[:])
|
||||
promExemplars[i] = prometheus.Exemplar{
|
||||
Value: float64(exemplar.Value),
|
||||
Timestamp: exemplar.Time,
|
||||
|
|
@ -674,11 +658,10 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata
|
|||
return metricWithExemplar
|
||||
}
|
||||
|
||||
func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels {
|
||||
func attributesToLabels(attrs []attribute.KeyValue, labelNamer otlptranslator.LabelNamer) prometheus.Labels {
|
||||
labels := make(map[string]string)
|
||||
for _, attr := range attrs {
|
||||
key := model.EscapeName(string(attr.Key), model.NameEscapingScheme)
|
||||
labels[key] = attr.Value.Emit()
|
||||
labels[labelNamer.Build(string(attr.Key))] = attr.Value.Emit()
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
|
|
|||
40
vendor/modules.txt
vendored
40
vendor/modules.txt
vendored
|
|
@ -292,7 +292,7 @@ codeberg.org/gruf/go-structr
|
|||
# github.com/DmitriyVTitov/size v1.5.0
|
||||
## explicit; go 1.14
|
||||
github.com/DmitriyVTitov/size
|
||||
# github.com/KimMachineGun/automemlimit v0.7.3
|
||||
# github.com/KimMachineGun/automemlimit v0.7.4
|
||||
## explicit; go 1.22.0
|
||||
github.com/KimMachineGun/automemlimit/memlimit
|
||||
# github.com/Masterminds/goutils v1.1.1
|
||||
|
|
@ -586,6 +586,9 @@ github.com/gorilla/sessions
|
|||
# github.com/gorilla/websocket v1.5.3
|
||||
## explicit; go 1.12
|
||||
github.com/gorilla/websocket
|
||||
# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
## explicit; go 1.21
|
||||
github.com/grafana/regexp
|
||||
# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1
|
||||
## explicit; go 1.23.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
|
||||
|
|
@ -644,7 +647,7 @@ github.com/k3a/html2text
|
|||
github.com/klauspost/compress/internal/le
|
||||
github.com/klauspost/compress/internal/race
|
||||
github.com/klauspost/compress/s2
|
||||
# github.com/klauspost/cpuid/v2 v2.2.10
|
||||
# github.com/klauspost/cpuid/v2 v2.2.11
|
||||
## explicit; go 1.22
|
||||
github.com/klauspost/cpuid/v2
|
||||
# github.com/kr/pretty v0.3.1
|
||||
|
|
@ -669,16 +672,16 @@ github.com/mattn/go-isatty
|
|||
## explicit; go 1.19
|
||||
github.com/microcosm-cc/bluemonday
|
||||
github.com/microcosm-cc/bluemonday/css
|
||||
# github.com/miekg/dns v1.1.66
|
||||
# github.com/miekg/dns v1.1.67
|
||||
## explicit; go 1.23.0
|
||||
github.com/miekg/dns
|
||||
# github.com/minio/crc64nvme v1.0.1
|
||||
# github.com/minio/crc64nvme v1.0.2
|
||||
## explicit; go 1.22
|
||||
github.com/minio/crc64nvme
|
||||
# github.com/minio/md5-simd v1.1.2
|
||||
## explicit; go 1.14
|
||||
github.com/minio/md5-simd
|
||||
# github.com/minio/minio-go/v7 v7.0.94
|
||||
# github.com/minio/minio-go/v7 v7.0.95
|
||||
## explicit; go 1.23.0
|
||||
github.com/minio/minio-go/v7
|
||||
github.com/minio/minio-go/v7/internal/json
|
||||
|
|
@ -688,6 +691,7 @@ github.com/minio/minio-go/v7/pkg/encrypt
|
|||
github.com/minio/minio-go/v7/pkg/kvcache
|
||||
github.com/minio/minio-go/v7/pkg/lifecycle
|
||||
github.com/minio/minio-go/v7/pkg/notification
|
||||
github.com/minio/minio-go/v7/pkg/peeker
|
||||
github.com/minio/minio-go/v7/pkg/replication
|
||||
github.com/minio/minio-go/v7/pkg/s3utils
|
||||
github.com/minio/minio-go/v7/pkg/set
|
||||
|
|
@ -695,7 +699,6 @@ github.com/minio/minio-go/v7/pkg/signer
|
|||
github.com/minio/minio-go/v7/pkg/singleflight
|
||||
github.com/minio/minio-go/v7/pkg/sse
|
||||
github.com/minio/minio-go/v7/pkg/tags
|
||||
github.com/minio/minio-go/v7/pkg/utils
|
||||
# github.com/mitchellh/copystructure v1.2.0
|
||||
## explicit; go 1.15
|
||||
github.com/mitchellh/copystructure
|
||||
|
|
@ -744,7 +747,7 @@ github.com/pelletier/go-toml/v2/internal/characters
|
|||
github.com/pelletier/go-toml/v2/internal/danger
|
||||
github.com/pelletier/go-toml/v2/internal/tracker
|
||||
github.com/pelletier/go-toml/v2/unstable
|
||||
# github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c
|
||||
# github.com/philhofer/fwd v1.2.0
|
||||
## explicit; go 1.20
|
||||
github.com/philhofer/fwd
|
||||
# github.com/pkg/errors v0.9.1
|
||||
|
|
@ -774,7 +777,10 @@ github.com/prometheus/client_model/go
|
|||
## explicit; go 1.23.0
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/model
|
||||
# github.com/prometheus/procfs v0.16.1
|
||||
# github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/otlptranslator
|
||||
# github.com/prometheus/procfs v0.17.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
|
|
@ -821,7 +827,7 @@ github.com/spf13/cast/internal
|
|||
# github.com/spf13/cobra v1.9.1
|
||||
## explicit; go 1.15
|
||||
github.com/spf13/cobra
|
||||
# github.com/spf13/pflag v1.0.6
|
||||
# github.com/spf13/pflag v1.0.7
|
||||
## explicit; go 1.12
|
||||
github.com/spf13/pflag
|
||||
# github.com/spf13/viper v1.20.1
|
||||
|
|
@ -841,7 +847,7 @@ github.com/stretchr/testify/suite
|
|||
# github.com/subosito/gotenv v1.6.0
|
||||
## explicit; go 1.18
|
||||
github.com/subosito/gotenv
|
||||
# github.com/tdewolff/minify/v2 v2.23.8
|
||||
# github.com/tdewolff/minify/v2 v2.23.9
|
||||
## explicit; go 1.17
|
||||
github.com/tdewolff/minify/v2
|
||||
github.com/tdewolff/minify/v2/html
|
||||
|
|
@ -933,7 +939,7 @@ github.com/ugorji/go/codec
|
|||
github.com/ulule/limiter/v3
|
||||
github.com/ulule/limiter/v3/drivers/store/common
|
||||
github.com/ulule/limiter/v3/drivers/store/memory
|
||||
# github.com/uptrace/bun v1.2.14
|
||||
# github.com/uptrace/bun v1.2.15
|
||||
## explicit; go 1.23.0
|
||||
github.com/uptrace/bun
|
||||
github.com/uptrace/bun/dialect
|
||||
|
|
@ -947,13 +953,13 @@ github.com/uptrace/bun/internal/tagparser
|
|||
github.com/uptrace/bun/migrate
|
||||
github.com/uptrace/bun/migrate/sqlschema
|
||||
github.com/uptrace/bun/schema
|
||||
# github.com/uptrace/bun/dialect/pgdialect v1.2.14
|
||||
# github.com/uptrace/bun/dialect/pgdialect v1.2.15
|
||||
## explicit; go 1.23.0
|
||||
github.com/uptrace/bun/dialect/pgdialect
|
||||
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.14
|
||||
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.15
|
||||
## explicit; go 1.23.0
|
||||
github.com/uptrace/bun/dialect/sqlitedialect
|
||||
# github.com/uptrace/bun/extra/bunotel v1.2.14
|
||||
# github.com/uptrace/bun/extra/bunotel v1.2.15
|
||||
## explicit; go 1.23.0
|
||||
github.com/uptrace/bun/extra/bunotel
|
||||
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2
|
||||
|
|
@ -1069,7 +1075,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
|
|||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
|
||||
# go.opentelemetry.io/otel/exporters/prometheus v0.59.0
|
||||
# go.opentelemetry.io/otel/exporters/prometheus v0.59.1
|
||||
## explicit; go 1.23.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus
|
||||
# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.13.0
|
||||
|
|
@ -1162,7 +1168,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
|||
# golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/exp/constraints
|
||||
# golang.org/x/image v0.28.0
|
||||
# golang.org/x/image v0.29.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/image/riff
|
||||
golang.org/x/image/vp8
|
||||
|
|
@ -1173,7 +1179,7 @@ golang.org/x/image/webp
|
|||
golang.org/x/mod/internal/lazyregexp
|
||||
golang.org/x/mod/module
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.41.0
|
||||
# golang.org/x/net v0.42.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/net/bpf
|
||||
golang.org/x/net/context
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue