diff --git a/cmd/gotosocial/action/migration/run.go b/cmd/gotosocial/action/migration/run.go new file mode 100644 index 000000000..61cec035b --- /dev/null +++ b/cmd/gotosocial/action/migration/run.go @@ -0,0 +1,65 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package migration + +import ( + "context" + "fmt" + + "code.superseriousbusiness.org/gotosocial/cmd/gotosocial/action" + "code.superseriousbusiness.org/gotosocial/internal/db/bundb" + "code.superseriousbusiness.org/gotosocial/internal/log" + "code.superseriousbusiness.org/gotosocial/internal/state" +) + +// Run will initialize the database, running any available migrations. +var Run action.GTSAction = func(ctx context.Context) error { + var state state.State + + defer func() { + if state.DB != nil { + // Lastly, if database service was started, + // ensure it gets closed now all else stopped. + if err := state.DB.Close(); err != nil { + log.Errorf(ctx, "error stopping database: %v", err) + } + } + + // Finally reached end of shutdown. + log.Info(ctx, "done! exiting...") + }() + + // Initialize caches + state.Caches.Init() + if err := state.Caches.Start(); err != nil { + return fmt.Errorf("error starting caches: %w", err) + } + + log.Info(ctx, "starting db service...") + + // Open connection to the database now caches started. + dbService, err := bundb.NewBunDBService(ctx, &state) + if err != nil { + return fmt.Errorf("error creating dbservice: %s", err) + } + + // Set DB on state. + state.DB = dbService + + return nil +} diff --git a/cmd/gotosocial/main.go b/cmd/gotosocial/main.go index 5124147a7..c9bcef2b8 100644 --- a/cmd/gotosocial/main.go +++ b/cmd/gotosocial/main.go @@ -55,6 +55,7 @@ func main() { rootCmd.AddCommand(serverCommands()) rootCmd.AddCommand(debugCommands()) rootCmd.AddCommand(adminCommands()) + rootCmd.AddCommand(migrationCommands()) // Testrigcmd will only be set when debug is enabled. if testrigCmd := testrigCommands(); testrigCmd != nil { diff --git a/cmd/gotosocial/migrations.go b/cmd/gotosocial/migrations.go new file mode 100644 index 000000000..bd30192ea --- /dev/null +++ b/cmd/gotosocial/migrations.go @@ -0,0 +1,43 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package main + +import ( + "code.superseriousbusiness.org/gotosocial/cmd/gotosocial/action/migration" + "github.com/spf13/cobra" +) + +// migrationCommands returns the 'migrations' subcommand +func migrationCommands() *cobra.Command { + migrationCmd := &cobra.Command{ + Use: "migrations", + Short: "gotosocial migrations-related tasks", + } + migrationRunCmd := &cobra.Command{ + Use: "run", + Short: "starts and stops the database, running any outstanding migrations", + PreRunE: func(cmd *cobra.Command, args []string) error { + return preRun(preRunArgs{cmd: cmd}) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return run(cmd.Context(), migration.Run) + }, + } + migrationCmd.AddCommand(migrationRunCmd) + return migrationCmd +} diff --git a/docs/advanced/scraper_deterrence.md b/docs/advanced/scraper_deterrence.md index 0ec9f0724..e20e3f6c6 100644 --- a/docs/advanced/scraper_deterrence.md +++ b/docs/advanced/scraper_deterrence.md @@ -1,9 +1,9 @@ # Scraper Deterrence GoToSocial provides an optional proof-of-work based scraper and automated HTTP client deterrence that can be enabled on profile and status web views. The way -it works is that it generates a unique but deterministic challenge for each incoming HTTP request based on client information and current time, that-is a hex encoded SHA256 hash, and asks the client to find an addition to a portion of this that will generate a hex encoded SHA256 hash with a pre-determined number of leading '0' characters. This is served to the client as a minimal holding page with a single JavaScript worker that computes a solution to this. +it works is that it generates a unique but deterministic challenge for each incoming HTTP request based on client information and current time, that-is a hex encoded SHA256 hash. It then asks the client to find an integer addition to a portion of this that will generate an expected encoded hash result. This is served to the client as a minimal holding page with a single JavaScript worker that computes a solution to this. -The number of required leading '0' characters can be configured to your liking, where higher values take longer to solve, and lower values take less. But this is not exact, as the challenges themselves are random, so you can only effect the **average amount of time** it may take. If your challenges take too long to solve, you may deter users from accessing your web pages. And conversely, the longer it takes for a solution to be found, the more you'll be incurring costs for scrapers (and in some cases, causing their operation to time-out). That balance is up to you to configure, hence why this is an advanced feature. +The number of hash encode rounds the client is required to complete may be configured, where high values will take the client longer to find a solution and vice-versa. We also instill a certain amount of jitter to make it harder for scrapers to "game" the algorithm. If your challenges take too long to solve, you may deter users from accessing your web pages. And conversely, the longer it takes for a solution to be found, the more you'll be incurring costs for scrapers (and in some cases, causing their operation to time-out). That balance is up to you to configure, hence why this is an advanced feature. Once a solution to this challenge has been provided, by refreshing the page with the solution in the query parameter, GoToSocial will verify this solution and on success will return the expected profile / status page with a cookie that provides challenge-less access to the instance for up-to the next hour. diff --git a/example/config.yaml b/example/config.yaml index 2026f1a15..b41c0b06f 100644 --- a/example/config.yaml +++ b/example/config.yaml @@ -1307,10 +1307,9 @@ advanced-header-filter-mode: "" advanced-scraper-deterrence-enabled: false # Uint. Allows tweaking the difficulty of the proof-of-work algorithm -# used in the scraper deterrence. This determines how many leading '0' -# characters are required to be generated in each solution. Higher -# values will on-average take longer to find solutions for, and the -# inverse is also true. +# used in the scraper deterrence. This determines roughly how many hash +# encode rounds we require the client to complete to find a solution. +# Higher values will take longer to find solutions for, and vice-versa. # # The downside is that if your deterrence takes too long to solve, # it may deter some users from viewing your web status / profile page. @@ -1321,6 +1320,6 @@ advanced-scraper-deterrence-enabled: false # For more details please check the documentation at: # https://docs.gotosocial.org/en/latest/advanced/scraper_deterrence # -# Examples: [3, 4, 5] -# Default: 4 -advanced-scraper-deterrence-difficulty: 4 +# Examples: [50000, 100000, 500000] +# Default: 100000 +advanced-scraper-deterrence-difficulty: 100000 diff --git a/go.mod b/go.mod index 13851d066..a94500d69 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( code.superseriousbusiness.org/exif-terminator v0.11.0 code.superseriousbusiness.org/httpsig v1.4.0 code.superseriousbusiness.org/oauth2/v4 v4.8.0 + codeberg.org/gruf/go-bitutil v1.1.0 codeberg.org/gruf/go-bytesize v1.0.3 codeberg.org/gruf/go-byteutil v1.3.0 codeberg.org/gruf/go-cache/v3 v3.6.1 @@ -74,8 +75,8 @@ require ( github.com/uptrace/bun/extra/bunotel v1.2.11 github.com/wagslane/go-password-validator v0.3.0 github.com/yuin/goldmark v1.7.12 - go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 + go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 go.opentelemetry.io/otel v1.36.0 go.opentelemetry.io/otel/metric v1.36.0 go.opentelemetry.io/otel/sdk v1.36.0 @@ -109,7 +110,7 @@ require ( github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/bytedance/sonic v1.13.2 // indirect github.com/bytedance/sonic/loader v0.2.4 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudwego/base64x v0.1.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -153,7 +154,7 @@ require ( github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/sessions v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -186,9 +187,9 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.64.0 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect @@ -210,30 +211,30 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.mongodb.org/mongo-driver v1.17.3 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.60.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.57.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 // indirect - go.opentelemetry.io/otel/log v0.11.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.11.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.58.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect + go.opentelemetry.io/otel/log v0.12.2 // indirect + go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect + go.opentelemetry.io/proto/otlp v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.16.0 // indirect golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect golang.org/x/mod v0.24.0 // indirect golang.org/x/sync v0.14.0 // indirect golang.org/x/tools v0.33.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.71.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/grpc v1.72.1 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect modernc.org/libc v1.65.7 // indirect diff --git a/go.sum b/go.sum index 7c60aa200..5122770f3 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,8 @@ code.superseriousbusiness.org/httpsig v1.4.0 h1:g9+KQMoTG0oR0II5gYb5pVVdNjbc7Cii code.superseriousbusiness.org/httpsig v1.4.0/go.mod h1:i2AKpj/WbA/o/UTvia9TAREzt0jP1AH3T1Uxjyhdzlw= code.superseriousbusiness.org/oauth2/v4 v4.8.0 h1:4LVXoPJXKgmDfwDegzBQPNpsdleMaL6YmDgFi6UDgEE= code.superseriousbusiness.org/oauth2/v4 v4.8.0/go.mod h1:+RLRBXPkjP/VhIC/46dcZkx3t5IvBSJYOjVCPgeWors= +codeberg.org/gruf/go-bitutil v1.1.0 h1:U1Q+A1mtnPk+npqYrlRBc9ar2C5hYiBd17l1Wrp2Bt8= +codeberg.org/gruf/go-bitutil v1.1.0/go.mod h1:rGibFevYTQfYKcPv0Df5KpG8n5xC3AfD4d/UgYeoNy0= codeberg.org/gruf/go-bytesize v1.0.3 h1:Tz8tCxhPLeyM5VryuBNjUHgKmLj4Bx9RbPaUSA3qg6g= codeberg.org/gruf/go-bytesize v1.0.3/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacpp0OHfkvLPs= codeberg.org/gruf/go-byteutil v1.3.0 h1:nRqJnCcRQ7xbfU6azw7zOzJrSMDIJHBqX6FL9vEMYmU= @@ -86,8 +88,8 @@ github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1 github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= @@ -243,8 +245,8 @@ github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzq github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -350,12 +352,12 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4 github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= +github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc= @@ -497,50 +499,52 @@ go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeH go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.60.0 h1:x7sPooQCwSg27SjtQee8GyIIRTQcF4s7eSkac6F2+VA= -go.opentelemetry.io/contrib/bridges/prometheus v0.60.0/go.mod h1:4K5UXgiHxV484efGs42ejD7E2J/sIlepYgdGoPXe7hE= -go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 h1:GuQXpvSXNjpswpweIem84U9BNauqHHi2w1GtNAalvpM= -go.opentelemetry.io/contrib/exporters/autoexport v0.60.0/go.mod h1:CkmxekdHco4d7thFJNPQ7Mby4jMBgZUclnrxT4e+ryk= -go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 h1:0NgN/3SYkqYJ9NBlDfl/2lzVlwos/YQLvi8sUrzJRBE= -go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0/go.mod h1:oxpUfhTkhgQaYIjtBt3T3w135dLoxq//qo3WPlPIKkE= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= +go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= +go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= +go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 h1:oIZsTHd0YcrvvUCN2AaQqyOcd685NQ+rFmrajveCIhA= +go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0/go.mod h1:X4KSPIvxnY/G5c9UOGXtFoL91t1gmlHpDQzeK5Zc/Bw= go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 h1:HMUytBT3uGhPKYY/u/G5MR9itrlSO2SMOsSD3Tk3k7A= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0/go.mod h1:hdDXsiNLmdW/9BF2jQpnHHlhFajpWCEYfM6e5m2OAZg= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 h1:C/Wi2F8wEmbxJ9Kuzw/nhP+Z9XaHYMkyDmXy6yR2cjw= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0/go.mod h1:0Lr9vmGKzadCTgsiBydxr6GEZ8SsZ7Ks53LzjWG5Ar4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= -go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk= -go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 h1:k6KdfZk72tVW/QVZf60xlDziDvYAePj5QHwoQvrB2m8= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0/go.mod h1:5Y3ZJLqzi/x/kYtrSrPSx7TFI/SGsL7q2kME027tH6I= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 h1:T0Ec2E+3YZf5bgTNQVet8iTDW7oIk03tXHq+wkwIDnE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0/go.mod h1:30v2gqH+vYGJsesLWFov8u47EpYTcIQcBjKpI6pJThg= -go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y= -go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= +go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= +go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/log v0.11.0 h1:7bAOpjpGglWhdEzP8z0VXc4jObOiDEwr3IYbhBnjk2c= -go.opentelemetry.io/otel/sdk/log v0.11.0/go.mod h1:dndLTxZbwBstZoqsJB3kGsRPkpAgaJrWfQg3lhlHFFY= +go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= +go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= +go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= +go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= +go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -648,12 +652,12 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/config/config.go b/internal/config/config.go index 5b8da9703..f051ab005 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -280,6 +280,6 @@ type ThrottlingConfig struct { } type ScraperDeterrenceConfig struct { - Enabled bool `name:"enabled" usage:"Enable proof-of-work based scraper deterrence on profile / status pages"` - Difficulty uint8 `name:"difficulty" usage:"The proof-of-work difficulty, which determines how many leading zeros to try solve in hash solutions."` + Enabled bool `name:"enabled" usage:"Enable proof-of-work based scraper deterrence on profile / status pages"` + Difficulty uint32 `name:"difficulty" usage:"The proof-of-work difficulty, which determines roughly how many hash-encode rounds required of each client."` } diff --git a/internal/config/defaults.go b/internal/config/defaults.go index 8372e4b00..ad124e90f 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -149,7 +149,7 @@ var Defaults = Configuration{ ScraperDeterrence: ScraperDeterrenceConfig{ Enabled: false, - Difficulty: 4, + Difficulty: 100000, }, }, diff --git a/internal/config/helpers.gen.go b/internal/config/helpers.gen.go index 269aa7abc..38f33c67e 100644 --- a/internal/config/helpers.gen.go +++ b/internal/config/helpers.gen.go @@ -144,7 +144,7 @@ func (cfg *Configuration) RegisterFlags(flags *pflag.FlagSet) { flags.Int("advanced-throttling-multiplier", cfg.Advanced.Throttling.Multiplier, "Multiplier to use per cpu for http request throttling. 0 or less turns throttling off.") flags.Duration("advanced-throttling-retry-after", cfg.Advanced.Throttling.RetryAfter, "Retry-After duration response to send for throttled requests.") flags.Bool("advanced-scraper-deterrence-enabled", cfg.Advanced.ScraperDeterrence.Enabled, "Enable proof-of-work based scraper deterrence on profile / status pages") - flags.Uint8("advanced-scraper-deterrence-difficulty", cfg.Advanced.ScraperDeterrence.Difficulty, "The proof-of-work difficulty, which determines how many leading zeros to try solve in hash solutions.") + flags.Uint32("advanced-scraper-deterrence-difficulty", cfg.Advanced.ScraperDeterrence.Difficulty, "The proof-of-work difficulty, which determines how many leading zeros to try solve in hash solutions.") flags.StringSlice("http-client-allow-ips", cfg.HTTPClient.AllowIPs, "") flags.StringSlice("http-client-block-ips", cfg.HTTPClient.BlockIPs, "") flags.Duration("http-client-timeout", cfg.HTTPClient.Timeout, "") @@ -1356,9 +1356,9 @@ func (cfg *Configuration) UnmarshalMap(cfgmap map[string]any) error { if ival, ok := cfgmap["advanced-scraper-deterrence-difficulty"]; ok { var err error - cfg.Advanced.ScraperDeterrence.Difficulty, err = cast.ToUint8E(ival) + cfg.Advanced.ScraperDeterrence.Difficulty, err = cast.ToUint32E(ival) if err != nil { - return fmt.Errorf("error casting %#v -> uint8 for 'advanced-scraper-deterrence-difficulty': %w", ival, err) + return fmt.Errorf("error casting %#v -> uint32 for 'advanced-scraper-deterrence-difficulty': %w", ival, err) } } @@ -4799,7 +4799,7 @@ func AdvancedScraperDeterrenceDifficultyFlag() string { } // GetAdvancedScraperDeterrenceDifficulty safely fetches the Configuration value for state's 'Advanced.ScraperDeterrence.Difficulty' field -func (st *ConfigState) GetAdvancedScraperDeterrenceDifficulty() (v uint8) { +func (st *ConfigState) GetAdvancedScraperDeterrenceDifficulty() (v uint32) { st.mutex.RLock() v = st.config.Advanced.ScraperDeterrence.Difficulty st.mutex.RUnlock() @@ -4807,7 +4807,7 @@ func (st *ConfigState) GetAdvancedScraperDeterrenceDifficulty() (v uint8) { } // SetAdvancedScraperDeterrenceDifficulty safely sets the Configuration value for state's 'Advanced.ScraperDeterrence.Difficulty' field -func (st *ConfigState) SetAdvancedScraperDeterrenceDifficulty(v uint8) { +func (st *ConfigState) SetAdvancedScraperDeterrenceDifficulty(v uint32) { st.mutex.Lock() defer st.mutex.Unlock() st.config.Advanced.ScraperDeterrence.Difficulty = v @@ -4815,12 +4815,12 @@ func (st *ConfigState) SetAdvancedScraperDeterrenceDifficulty(v uint8) { } // GetAdvancedScraperDeterrenceDifficulty safely fetches the value for global configuration 'Advanced.ScraperDeterrence.Difficulty' field -func GetAdvancedScraperDeterrenceDifficulty() uint8 { +func GetAdvancedScraperDeterrenceDifficulty() uint32 { return global.GetAdvancedScraperDeterrenceDifficulty() } // SetAdvancedScraperDeterrenceDifficulty safely sets the value for global configuration 'Advanced.ScraperDeterrence.Difficulty' field -func SetAdvancedScraperDeterrenceDifficulty(v uint8) { +func SetAdvancedScraperDeterrenceDifficulty(v uint32) { global.SetAdvancedScraperDeterrenceDifficulty(v) } diff --git a/internal/db/bundb/bundb.go b/internal/db/bundb/bundb.go index 8a3108ef2..bccf5ec98 100644 --- a/internal/db/bundb/bundb.go +++ b/internal/db/bundb/bundb.go @@ -336,7 +336,6 @@ func bunDB(sqldb *sql.DB, dialect func() schema.Dialect) *bun.DB { >smodel.ConversationToStatus{}, >smodel.StatusToEmoji{}, >smodel.StatusToTag{}, - >smodel.ThreadToStatus{}, } { db.RegisterModel(t) } diff --git a/internal/db/bundb/migrations/20231016113235_mute_status_thread.go b/internal/db/bundb/migrations/20231016113235_mute_status_thread.go index 44eed5c1d..6f7518ba1 100644 --- a/internal/db/bundb/migrations/20231016113235_mute_status_thread.go +++ b/internal/db/bundb/migrations/20231016113235_mute_status_thread.go @@ -21,7 +21,7 @@ import ( "context" "strings" - gtsmodel "code.superseriousbusiness.org/gotosocial/internal/gtsmodel" + gtsmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20231016113235_mute_status_thread" "code.superseriousbusiness.org/gotosocial/internal/log" "github.com/uptrace/bun" "github.com/uptrace/bun/dialect" diff --git a/internal/db/bundb/migrations/20231016113235_mute_status_thread/thread.go b/internal/db/bundb/migrations/20231016113235_mute_status_thread/thread.go new file mode 100644 index 000000000..5d5af1993 --- /dev/null +++ b/internal/db/bundb/migrations/20231016113235_mute_status_thread/thread.go @@ -0,0 +1,32 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package gtsmodel + +// Thread represents one thread of statuses. +// TODO: add more fields here if necessary. +type Thread struct { + ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database + StatusIDs []string `bun:"-"` // ids of statuses belonging to this thread (order not guaranteed) +} + +// ThreadToStatus is an intermediate struct to facilitate the +// many2many relationship between a thread and one or more statuses. +type ThreadToStatus struct { + ThreadID string `bun:"type:CHAR(26),unique:statusthread,nullzero,notnull"` + StatusID string `bun:"type:CHAR(26),unique:statusthread,nullzero,notnull"` +} diff --git a/internal/db/bundb/migrations/20231016113235_mute_status_thread/threadmute.go b/internal/db/bundb/migrations/20231016113235_mute_status_thread/threadmute.go new file mode 100644 index 000000000..170f568a1 --- /dev/null +++ b/internal/db/bundb/migrations/20231016113235_mute_status_thread/threadmute.go @@ -0,0 +1,29 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package gtsmodel + +import "time" + +// ThreadMute represents an account-level mute of a thread of statuses. +type ThreadMute struct { + ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database + CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created + UpdatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item last updated + ThreadID string `bun:"type:CHAR(26),nullzero,notnull,unique:thread_mute_thread_id_account_id"` // ID of the muted thread + AccountID string `bun:"type:CHAR(26),nullzero,notnull,unique:thread_mute_thread_id_account_id"` // Account ID of the creator of this mute +} diff --git a/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go b/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go new file mode 100644 index 000000000..fc02d1e40 --- /dev/null +++ b/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go @@ -0,0 +1,584 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package migrations + +import ( + "context" + "database/sql" + "errors" + "reflect" + "slices" + "strings" + + "code.superseriousbusiness.org/gotosocial/internal/db" + newmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new" + oldmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/old" + "code.superseriousbusiness.org/gotosocial/internal/gtserror" + "code.superseriousbusiness.org/gotosocial/internal/id" + "code.superseriousbusiness.org/gotosocial/internal/log" + "github.com/uptrace/bun" +) + +func init() { + up := func(ctx context.Context, db *bun.DB) error { + newType := reflect.TypeOf(&newmodel.Status{}) + + // Get the new column definition with not-null thread_id. + newColDef, err := getBunColumnDef(db, newType, "ThreadID") + if err != nil { + return gtserror.Newf("error getting bun column def: %w", err) + } + + // Update column def to use '${name}_new'. + newColDef = strings.Replace(newColDef, + "thread_id", "thread_id_new", 1) + + var sr statusRethreader + var count int + var maxID string + var statuses []*oldmodel.Status + + // Get a total count of all statuses before migration. + total, err := db.NewSelect().Table("statuses").Count(ctx) + if err != nil { + return gtserror.Newf("error getting status table count: %w", err) + } + + // Start at largest + // possible ULID value. + maxID = id.Highest + + log.Warn(ctx, "rethreading top-level statuses, this will take a *long* time") + for /* TOP LEVEL STATUS LOOP */ { + + // Reset slice. + clear(statuses) + statuses = statuses[:0] + + // Select top-level statuses. + if err := db.NewSelect(). + Model(&statuses). + Column("id", "thread_id"). + + // We specifically use in_reply_to_account_id instead of in_reply_to_id as + // they should both be set / unset in unison, but we specifically have an + // index on in_reply_to_account_id with ID ordering, unlike in_reply_to_id. + Where("? IS NULL", bun.Ident("in_reply_to_account_id")). + Where("? < ?", bun.Ident("id"), maxID). + OrderExpr("? DESC", bun.Ident("id")). + Limit(5000). + Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) { + return gtserror.Newf("error selecting top level statuses: %w", err) + } + + // Reached end of block. + if len(statuses) == 0 { + break + } + + // Set next maxID value from statuses. + maxID = statuses[len(statuses)-1].ID + + // Rethread each selected batch of top-level statuses in a transaction. + if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { + + // Rethread each top-level status. + for _, status := range statuses { + n, err := sr.rethreadStatus(ctx, tx, status) + if err != nil { + return gtserror.Newf("error rethreading status %s: %w", status.URI, err) + } + count += n + } + + return nil + }); err != nil { + return err + } + + log.Infof(ctx, "[approx %d of %d] rethreading statuses (top-level)", count, total) + } + + // Attempt to merge any sqlite write-ahead-log. + if err := doWALCheckpoint(ctx, db); err != nil { + return err + } + + log.Warn(ctx, "rethreading straggler statuses, this will take a *long* time") + for /* STRAGGLER STATUS LOOP */ { + + // Reset slice. + clear(statuses) + statuses = statuses[:0] + + // Select straggler statuses. + if err := db.NewSelect(). + Model(&statuses). + Column("id", "in_reply_to_id", "thread_id"). + Where("? IS NULL", bun.Ident("thread_id")). + + // We select in smaller batches for this part + // of the migration as there is a chance that + // we may be fetching statuses that might be + // part of the same thread, i.e. one call to + // rethreadStatus() may effect other statuses + // later in the slice. + Limit(1000). + Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) { + return gtserror.Newf("error selecting straggler statuses: %w", err) + } + + // Reached end of block. + if len(statuses) == 0 { + break + } + + // Rethread each selected batch of straggler statuses in a transaction. + if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { + + // Rethread each top-level status. + for _, status := range statuses { + n, err := sr.rethreadStatus(ctx, tx, status) + if err != nil { + return gtserror.Newf("error rethreading status %s: %w", status.URI, err) + } + count += n + } + + return nil + }); err != nil { + return err + } + + log.Infof(ctx, "[approx %d of %d] rethreading statuses (stragglers)", count, total) + } + + // Attempt to merge any sqlite write-ahead-log. + if err := doWALCheckpoint(ctx, db); err != nil { + return err + } + + log.Info(ctx, "dropping old thread_to_statuses table") + if _, err := db.NewDropTable(). + Table("thread_to_statuses"). + Exec(ctx); err != nil { + return gtserror.Newf("error dropping old thread_to_statuses table: %w", err) + } + + log.Info(ctx, "creating new statuses thread_id column") + if _, err := db.NewAddColumn(). + Table("statuses"). + ColumnExpr(newColDef). + Exec(ctx); err != nil { + return gtserror.Newf("error adding new thread_id column: %w", err) + } + + log.Info(ctx, "setting thread_id_new = thread_id (this may take a while...)") + if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { + return batchUpdateByID(ctx, tx, + "statuses", // table + "id", // batchByCol + "UPDATE ? SET ? = ?", // updateQuery + []any{bun.Ident("statuses"), + bun.Ident("thread_id_new"), + bun.Ident("thread_id")}, + ) + }); err != nil { + return err + } + + // Attempt to merge any sqlite write-ahead-log. + if err := doWALCheckpoint(ctx, db); err != nil { + return err + } + + log.Info(ctx, "dropping old statuses thread_id index") + if _, err := db.NewDropIndex(). + Index("statuses_thread_id_idx"). + Exec(ctx); err != nil { + return gtserror.Newf("error dropping old thread_id index: %w", err) + } + + log.Info(ctx, "dropping old statuses thread_id column") + if _, err := db.NewDropColumn(). + Table("statuses"). + Column("thread_id"). + Exec(ctx); err != nil { + return gtserror.Newf("error dropping old thread_id column: %w", err) + } + + log.Info(ctx, "renaming thread_id_new to thread_id") + if _, err := db.NewRaw( + "ALTER TABLE ? RENAME COLUMN ? TO ?", + bun.Ident("statuses"), + bun.Ident("thread_id_new"), + bun.Ident("thread_id"), + ).Exec(ctx); err != nil { + return gtserror.Newf("error renaming new column: %w", err) + } + + log.Info(ctx, "creating new statuses thread_id index") + if _, err := db.NewCreateIndex(). + Table("statuses"). + Index("statuses_thread_id_idx"). + Column("thread_id"). + Exec(ctx); err != nil { + return gtserror.Newf("error creating new thread_id index: %w", err) + } + + return nil + } + + down := func(ctx context.Context, db *bun.DB) error { + return nil + } + + if err := Migrations.Register(up, down); err != nil { + panic(err) + } +} + +type statusRethreader struct { + // the unique status and thread IDs + // of all models passed to append(). + // these are later used to update all + // statuses to a single thread ID, and + // update all thread related models to + // use the new updated thread ID. + statusIDs []string + threadIDs []string + + // stores the unseen IDs of status + // InReplyTos newly tracked in append(), + // which is then used for a SELECT query + // in getParents(), then promptly reset. + inReplyToIDs []string + + // statuses simply provides a reusable + // slice of status models for selects. + // its contents are ephemeral. + statuses []*oldmodel.Status + + // seenIDs tracks the unique status and + // thread IDs we have seen, ensuring we + // don't append duplicates to statusIDs + // or threadIDs slices. also helps prevent + // adding duplicate parents to inReplyToIDs. + seenIDs map[string]struct{} + + // allThreaded tracks whether every status + // passed to append() has a thread ID set. + // together with len(threadIDs) this can + // determine if already threaded correctly. + allThreaded bool +} + +// rethreadStatus is the main logic handler for statusRethreader{}. this is what gets called from the migration +// in order to trigger a status rethreading operation for the given status, returning total number rethreaded. +func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, status *oldmodel.Status) (int, error) { + + // Zero slice and + // map ptr values. + clear(sr.statusIDs) + clear(sr.threadIDs) + clear(sr.statuses) + clear(sr.seenIDs) + + // Reset slices and values for use. + sr.statusIDs = sr.statusIDs[:0] + sr.threadIDs = sr.threadIDs[:0] + sr.statuses = sr.statuses[:0] + sr.allThreaded = true + + if sr.seenIDs == nil { + // Allocate new hash set for status IDs. + sr.seenIDs = make(map[string]struct{}) + } + + // Ensure the passed status + // has up-to-date information. + // This may have changed from + // the initial batch selection + // to the rethreadStatus() call. + if err := tx.NewSelect(). + Model(status). + Column("in_reply_to_id", "thread_id"). + Where("? = ?", bun.Ident("id"), status.ID). + Scan(ctx); err != nil { + return 0, gtserror.Newf("error selecting status: %w", err) + } + + // status and thread ID cursor + // index values. these are used + // to keep track of newly loaded + // status / thread IDs between + // loop iterations. + var statusIdx int + var threadIdx int + + // Append given status as + // first to our ID slices. + sr.append(status) + + for { + // Fetch parents for newly seen in_reply_tos since last loop. + if err := sr.getParents(ctx, tx); err != nil { + return 0, gtserror.Newf("error getting parents: %w", err) + } + + // Fetch children for newly seen statuses since last loop. + if err := sr.getChildren(ctx, tx, statusIdx); err != nil { + return 0, gtserror.Newf("error getting children: %w", err) + } + + // Check for newly picked-up threads + // to find stragglers for below. Else + // we've reached end of what we can do. + if threadIdx >= len(sr.threadIDs) { + break + } + + // Update status IDs cursor. + statusIdx = len(sr.statusIDs) + + // Fetch any stragglers for newly seen threads since last loop. + if err := sr.getStragglers(ctx, tx, threadIdx); err != nil { + return 0, gtserror.Newf("error getting stragglers: %w", err) + } + + // Check for newly picked-up straggling statuses / replies to + // find parents / children for. Else we've done all we can do. + if statusIdx >= len(sr.statusIDs) && len(sr.inReplyToIDs) == 0 { + break + } + + // Update thread IDs cursor. + threadIdx = len(sr.threadIDs) + } + + // Total number of + // statuses threaded. + total := len(sr.statusIDs) + + // Check for the case where the entire + // batch of statuses is already correctly + // threaded. Then we have nothing to do! + if sr.allThreaded && len(sr.threadIDs) == 1 { + return 0, nil + } + + // Sort all of the threads and + // status IDs by age; old -> new. + slices.Sort(sr.threadIDs) + slices.Sort(sr.statusIDs) + + var threadID string + + if len(sr.threadIDs) > 0 { + // Regardless of whether there ended up being + // multiple threads, we take the oldest value + // thread ID to use for entire batch of them. + threadID = sr.threadIDs[0] + sr.threadIDs = sr.threadIDs[1:] + } + + if threadID == "" { + // None of the previous parents were threaded, we instead + // generate new thread with ID based on oldest creation time. + createdAt, err := id.TimeFromULID(sr.statusIDs[0]) + if err != nil { + return 0, gtserror.Newf("error parsing status ulid: %w", err) + } + + // Generate thread ID from parsed time. + threadID = id.NewULIDFromTime(createdAt) + + // We need to create a + // new thread table entry. + if _, err = tx.NewInsert(). + Model(&newmodel.Thread{ID: threadID}). + Exec(ctx); err != nil { + return 0, gtserror.Newf("error creating new thread: %w", err) + } + } + + // Update all the statuses to + // use determined thread_id. + if _, err := tx.NewUpdate(). + Table("statuses"). + Where("? IN (?)", bun.Ident("id"), bun.In(sr.statusIDs)). + Set("? = ?", bun.Ident("thread_id"), threadID). + Exec(ctx); err != nil { + return 0, gtserror.Newf("error updating status thread ids: %w", err) + } + + if len(sr.threadIDs) > 0 { + // Update any existing thread + // mutes to use latest thread_id. + if _, err := tx.NewUpdate(). + Table("thread_mutes"). + Where("? IN (?)", bun.Ident("thread_id"), bun.In(sr.threadIDs)). + Set("? = ?", bun.Ident("thread_id"), threadID). + Exec(ctx); err != nil { + return 0, gtserror.Newf("error updating mute thread ids: %w", err) + } + } + + return total, nil +} + +// append will append the given status to the internal tracking of statusRethreader{} for +// potential future operations, checking for uniqueness. it tracks the inReplyToID value +// for the next call to getParents(), it tracks the status ID for list of statuses that +// need updating, the thread ID for the list of thread links and mutes that need updating, +// and whether all the statuses all have a provided thread ID (i.e. allThreaded). +func (sr *statusRethreader) append(status *oldmodel.Status) { + + // Check if status already seen before. + if _, ok := sr.seenIDs[status.ID]; ok { + return + } + + if status.InReplyToID != "" { + // Status has a parent, add any unique parent ID + // to list of reply IDs that need to be queried. + if _, ok := sr.seenIDs[status.InReplyToID]; ok { + sr.inReplyToIDs = append(sr.inReplyToIDs, status.InReplyToID) + } + } + + // Add status' ID to list of seen status IDs. + sr.statusIDs = append(sr.statusIDs, status.ID) + + if status.ThreadID != "" { + // Status was threaded, add any unique thread + // ID to our list of known status thread IDs. + if _, ok := sr.seenIDs[status.ThreadID]; !ok { + sr.threadIDs = append(sr.threadIDs, status.ThreadID) + } + } else { + // Status was not threaded, + // we now know not all statuses + // found were threaded. + sr.allThreaded = false + } + + // Add status ID to map of seen IDs. + sr.seenIDs[status.ID] = struct{}{} +} + +func (sr *statusRethreader) getParents(ctx context.Context, tx bun.Tx) error { + var parent oldmodel.Status + + // Iteratively query parent for each stored + // reply ID. Note this is safe to do as slice + // loop since 'seenIDs' prevents duplicates. + for i := 0; i < len(sr.inReplyToIDs); i++ { + + // Get next status ID. + id := sr.statusIDs[i] + + // Select next parent status. + if err := tx.NewSelect(). + Model(&parent). + Column("id", "in_reply_to_id", "thread_id"). + Where("? = ?", bun.Ident("id"), id). + Scan(ctx); err != nil && err != db.ErrNoEntries { + return err + } + + // Parent was missing. + if parent.ID == "" { + continue + } + + // Add to slices. + sr.append(&parent) + } + + // Reset reply slice. + clear(sr.inReplyToIDs) + sr.inReplyToIDs = sr.inReplyToIDs[:0] + + return nil +} + +func (sr *statusRethreader) getChildren(ctx context.Context, tx bun.Tx, idx int) error { + // Iteratively query all children for each + // of fetched parent statuses. Note this is + // safe to do as a slice loop since 'seenIDs' + // ensures it only ever contains unique IDs. + for i := idx; i < len(sr.statusIDs); i++ { + + // Get next status ID. + id := sr.statusIDs[i] + + // Reset child slice. + clear(sr.statuses) + sr.statuses = sr.statuses[:0] + + // Select children of ID. + if err := tx.NewSelect(). + Model(&sr.statuses). + Column("id", "thread_id"). + Where("? = ?", bun.Ident("in_reply_to_id"), id). + Scan(ctx); err != nil && err != db.ErrNoEntries { + return err + } + + // Append child status IDs to slices. + for _, child := range sr.statuses { + sr.append(child) + } + } + + return nil +} + +func (sr *statusRethreader) getStragglers(ctx context.Context, tx bun.Tx, idx int) error { + // Check for threads to query. + if idx >= len(sr.threadIDs) { + return nil + } + + // Reset status slice. + clear(sr.statuses) + sr.statuses = sr.statuses[:0] + + // Select stragglers that + // also have thread IDs. + if err := tx.NewSelect(). + Model(&sr.statuses). + Column("id", "thread_id", "in_reply_to_id"). + Where("? IN (?) AND ? NOT IN (?)", + bun.Ident("thread_id"), + bun.In(sr.threadIDs[idx:]), + bun.Ident("id"), + bun.In(sr.statusIDs), + ). + Scan(ctx); err != nil && err != db.ErrNoEntries { + return err + } + + // Append status IDs to slices. + for _, status := range sr.statuses { + sr.append(status) + } + + return nil +} diff --git a/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new/status.go b/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new/status.go new file mode 100644 index 000000000..a03e93859 --- /dev/null +++ b/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new/status.go @@ -0,0 +1,133 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package gtsmodel + +import ( + "time" +) + +// Status represents a user-created 'post' or 'status' in the database, either remote or local +type Status struct { + ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database + CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created + EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set) + FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched. + PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time. + URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status + URL string `bun:",nullzero"` // web url for viewing this status + Content string `bun:""` // Content HTML for this status. + AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status + TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status + MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status + EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status + Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account? + AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status? + AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status + InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to + InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to + InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to + InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID + BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of + BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes. + BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status + BoostOf *Status `bun:"-"` // status that corresponds to boostOfID + ThreadID string `bun:"type:CHAR(26),nullzero,notnull,default:00000000000000000000000000"` // id of the thread to which this status belongs + EditIDs []string `bun:"edits,array"` // + PollID string `bun:"type:CHAR(26),nullzero"` // + ContentWarning string `bun:",nullzero"` // Content warning HTML for this status. + ContentWarningText string `bun:""` // Original text of the content warning without formatting + Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status + Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive? + Language string `bun:",nullzero"` // what language is this status written in? + CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status? + ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!. + Text string `bun:""` // Original text of the status without formatting + ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status + Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s) + PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. + PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. + ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to. +} + +// enumType is the type we (at least, should) use +// for database enum types. it is the largest size +// supported by a PostgreSQL SMALLINT, since an +// SQLite SMALLINT is actually variable in size. +type enumType int16 + +// Visibility represents the +// visibility granularity of a status. +type Visibility enumType + +const ( + // VisibilityNone means nobody can see this. + // It's only used for web status visibility. + VisibilityNone Visibility = 1 + + // VisibilityPublic means this status will + // be visible to everyone on all timelines. + VisibilityPublic Visibility = 2 + + // VisibilityUnlocked means this status will be visible to everyone, + // but will only show on home timeline to followers, and in lists. + VisibilityUnlocked Visibility = 3 + + // VisibilityFollowersOnly means this status is viewable to followers only. + VisibilityFollowersOnly Visibility = 4 + + // VisibilityMutualsOnly means this status + // is visible to mutual followers only. + VisibilityMutualsOnly Visibility = 5 + + // VisibilityDirect means this status is + // visible only to mentioned recipients. + VisibilityDirect Visibility = 6 + + // VisibilityDefault is used when no other setting can be found. + VisibilityDefault Visibility = VisibilityUnlocked +) + +// String returns a stringified, frontend API compatible form of Visibility. +func (v Visibility) String() string { + switch v { + case VisibilityNone: + return "none" + case VisibilityPublic: + return "public" + case VisibilityUnlocked: + return "unlocked" + case VisibilityFollowersOnly: + return "followers_only" + case VisibilityMutualsOnly: + return "mutuals_only" + case VisibilityDirect: + return "direct" + default: + panic("invalid visibility") + } +} + +// StatusContentType is the content type with which a status's text is +// parsed. Can be either plain or markdown. Empty will default to plain. +type StatusContentType enumType + +const ( + StatusContentTypePlain StatusContentType = 1 + StatusContentTypeMarkdown StatusContentType = 2 + StatusContentTypeDefault = StatusContentTypePlain +) diff --git a/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new/thread.go b/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new/thread.go new file mode 100644 index 000000000..319752476 --- /dev/null +++ b/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new/thread.go @@ -0,0 +1,24 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package gtsmodel + +// Thread represents one thread of statuses. +// TODO: add more fields here if necessary. +type Thread struct { + ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database +} diff --git a/internal/db/bundb/migrations/20250415111056_thread_all_statuses/old/status.go b/internal/db/bundb/migrations/20250415111056_thread_all_statuses/old/status.go new file mode 100644 index 000000000..f33a2b29e --- /dev/null +++ b/internal/db/bundb/migrations/20250415111056_thread_all_statuses/old/status.go @@ -0,0 +1,131 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package gtsmodel + +import ( + "time" +) + +// Status represents a user-created 'post' or 'status' in the database, either remote or local +type Status struct { + ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database + CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created + EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set) + FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched. + PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time. + URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status + URL string `bun:",nullzero"` // web url for viewing this status + Content string `bun:""` // Content HTML for this status. + AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status + TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status + MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status + EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status + Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account? + AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status? + AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status + InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to + InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to + InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to + BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of + BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes. + BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status + ThreadID string `bun:"type:CHAR(26),nullzero"` // id of the thread to which this status belongs; only set for remote statuses if a local account is involved at some point in the thread, otherwise null + EditIDs []string `bun:"edits,array"` // + PollID string `bun:"type:CHAR(26),nullzero"` // + ContentWarning string `bun:",nullzero"` // Content warning HTML for this status. + ContentWarningText string `bun:""` // Original text of the content warning without formatting + Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status + Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive? + Language string `bun:",nullzero"` // what language is this status written in? + CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status? + ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!. + Text string `bun:""` // Original text of the status without formatting + ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status + Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s) + PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. + PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. + ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to. +} + +// enumType is the type we (at least, should) use +// for database enum types. it is the largest size +// supported by a PostgreSQL SMALLINT, since an +// SQLite SMALLINT is actually variable in size. +type enumType int16 + +// Visibility represents the +// visibility granularity of a status. +type Visibility enumType + +const ( + // VisibilityNone means nobody can see this. + // It's only used for web status visibility. + VisibilityNone Visibility = 1 + + // VisibilityPublic means this status will + // be visible to everyone on all timelines. + VisibilityPublic Visibility = 2 + + // VisibilityUnlocked means this status will be visible to everyone, + // but will only show on home timeline to followers, and in lists. + VisibilityUnlocked Visibility = 3 + + // VisibilityFollowersOnly means this status is viewable to followers only. + VisibilityFollowersOnly Visibility = 4 + + // VisibilityMutualsOnly means this status + // is visible to mutual followers only. + VisibilityMutualsOnly Visibility = 5 + + // VisibilityDirect means this status is + // visible only to mentioned recipients. + VisibilityDirect Visibility = 6 + + // VisibilityDefault is used when no other setting can be found. + VisibilityDefault Visibility = VisibilityUnlocked +) + +// String returns a stringified, frontend API compatible form of Visibility. +func (v Visibility) String() string { + switch v { + case VisibilityNone: + return "none" + case VisibilityPublic: + return "public" + case VisibilityUnlocked: + return "unlocked" + case VisibilityFollowersOnly: + return "followers_only" + case VisibilityMutualsOnly: + return "mutuals_only" + case VisibilityDirect: + return "direct" + default: + panic("invalid visibility") + } +} + +// StatusContentType is the content type with which a status's text is +// parsed. Can be either plain or markdown. Empty will default to plain. +type StatusContentType enumType + +const ( + StatusContentTypePlain StatusContentType = 1 + StatusContentTypeMarkdown StatusContentType = 2 + StatusContentTypeDefault = StatusContentTypePlain +) diff --git a/internal/db/bundb/migrations/util.go b/internal/db/bundb/migrations/util.go index 3219a8aa7..f20f23c3f 100644 --- a/internal/db/bundb/migrations/util.go +++ b/internal/db/bundb/migrations/util.go @@ -26,6 +26,7 @@ import ( "strconv" "strings" + "code.superseriousbusiness.org/gotosocial/internal/config" "code.superseriousbusiness.org/gotosocial/internal/gtserror" "code.superseriousbusiness.org/gotosocial/internal/id" "code.superseriousbusiness.org/gotosocial/internal/log" @@ -37,6 +38,112 @@ import ( "github.com/uptrace/bun/schema" ) +// doWALCheckpoint attempt to force a WAL file merge on SQLite3, +// which can be useful given how much can build-up in the WAL. +// +// see: https://www.sqlite.org/pragma.html#pragma_wal_checkpoint +func doWALCheckpoint(ctx context.Context, db *bun.DB) error { + if db.Dialect().Name() == dialect.SQLite && strings.EqualFold(config.GetDbSqliteJournalMode(), "WAL") { + _, err := db.ExecContext(ctx, "PRAGMA wal_checkpoint(RESTART);") + if err != nil { + return gtserror.Newf("error performing wal_checkpoint: %w", err) + } + } + return nil +} + +// batchUpdateByID performs the given updateQuery with updateArgs +// over the entire given table, batching by the ID of batchByCol. +func batchUpdateByID( + ctx context.Context, + tx bun.Tx, + table string, + batchByCol string, + updateQuery string, + updateArgs []any, +) error { + // Get a count of all in table. + total, err := tx.NewSelect(). + Table(table). + Count(ctx) + if err != nil { + return gtserror.Newf("error selecting total count: %w", err) + } + + // Query batch size + // in number of rows. + const batchsz = 5000 + + // Stores highest batch value + // used in iterate queries, + // starting at highest possible. + highest := id.Highest + + // Total updated rows. + var updated int + + for { + // Limit to batchsz + // items at once. + batchQ := tx. + NewSelect(). + Table(table). + Column(batchByCol). + Where("? < ?", bun.Ident(batchByCol), highest). + OrderExpr("? DESC", bun.Ident(batchByCol)). + Limit(batchsz) + + // Finalize UPDATE to act only on batch. + qStr := updateQuery + " WHERE ? IN (?)" + args := append(slices.Clone(updateArgs), + bun.Ident(batchByCol), + batchQ, + ) + + // Execute the prepared raw query with arguments. + res, err := tx.NewRaw(qStr, args...).Exec(ctx) + if err != nil { + return gtserror.Newf("error updating old column values: %w", err) + } + + // Check how many items we updated. + thisUpdated, err := res.RowsAffected() + if err != nil { + return gtserror.Newf("error counting affected rows: %w", err) + } + + if thisUpdated == 0 { + // Nothing updated + // means we're done. + break + } + + // Update the overall count. + updated += int(thisUpdated) + + // Log helpful message to admin. + log.Infof(ctx, "migrated %d of %d %s (up to %s)", + updated, total, table, highest) + + // Get next highest + // id for next batch. + if err := tx. + NewSelect(). + With("batch_query", batchQ). + ColumnExpr("min(?) FROM ?", bun.Ident(batchByCol), bun.Ident("batch_query")). + Scan(ctx, &highest); err != nil { + return gtserror.Newf("error selecting next highest: %w", err) + } + } + + if total != int(updated) { + // Return error here in order to rollback the whole transaction. + return fmt.Errorf("total=%d does not match updated=%d", total, updated) + } + + return nil +} + // convertEnums performs a transaction that converts // a table's column of our old-style enums (strings) to // more performant and space-saving integer types. @@ -310,7 +417,7 @@ func getModelField(db bun.IDB, rtype reflect.Type, fieldName string) (*schema.Fi } // doesColumnExist safely checks whether given column exists on table, handling both SQLite and PostgreSQL appropriately. -func doesColumnExist(ctx context.Context, tx bun.Tx, table, col string) (bool, error) { +func doesColumnExist(ctx context.Context, tx bun.IDB, table, col string) (bool, error) { var n int var err error switch tx.Dialect().Name() { diff --git a/internal/db/bundb/status.go b/internal/db/bundb/status.go index cf4a2549a..81aba8726 100644 --- a/internal/db/bundb/status.go +++ b/internal/db/bundb/status.go @@ -21,11 +21,13 @@ import ( "context" "errors" "slices" + "strings" "code.superseriousbusiness.org/gotosocial/internal/db" "code.superseriousbusiness.org/gotosocial/internal/gtscontext" "code.superseriousbusiness.org/gotosocial/internal/gtserror" "code.superseriousbusiness.org/gotosocial/internal/gtsmodel" + "code.superseriousbusiness.org/gotosocial/internal/id" "code.superseriousbusiness.org/gotosocial/internal/log" "code.superseriousbusiness.org/gotosocial/internal/state" "code.superseriousbusiness.org/gotosocial/internal/util/xslices" @@ -335,115 +337,284 @@ func (s *statusDB) PutStatus(ctx context.Context, status *gtsmodel.Status) error // as the cache does not attempt a mutex lock until AFTER hook. // return s.db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { - // create links between this status and any emojis it uses - for _, i := range status.EmojiIDs { + if status.BoostOfID != "" { + var threadID string + + // Boost wrappers always inherit thread + // of the origin status they're boosting. + if err := tx. + NewSelect(). + Table("statuses"). + Column("thread_id"). + Where("? = ?", bun.Ident("id"), status.BoostOfID). + Scan(ctx, &threadID); err != nil { + return gtserror.Newf("error selecting boosted status: %w", err) + } + + // Set the selected thread. + status.ThreadID = threadID + + // They also require no further + // checks! Simply insert status here. + return insertStatus(ctx, tx, status) + } + + // Gather a list of possible thread IDs + // of all the possible related statuses + // to this one. If one exists we can use + // the end result, and if too many exist + // we can fix the status threading. + var threadIDs []string + + if status.InReplyToID != "" { + var threadID string + + // A stored parent status exists, + // select its thread ID to ideally + // inherit this for status. + if err := tx. + NewSelect(). + Table("statuses"). + Column("thread_id"). + Where("? = ?", bun.Ident("id"), status.InReplyToID). + Scan(ctx, &threadID); err != nil { + return gtserror.Newf("error selecting status parent: %w", err) + } + + // Append possible ID to threads slice. + threadIDs = append(threadIDs, threadID) + + } else if status.InReplyToURI != "" { + var ids []string + + // A parent status exists but is not + // yet stored. See if any siblings for + // this shared parent exist with their + // own thread IDs. + if err := tx. + NewSelect(). + Table("statuses"). + Column("thread_id"). + Where("? = ?", bun.Ident("in_reply_to_uri"), status.InReplyToURI). + Scan(ctx, &ids); err != nil && !errors.Is(err, db.ErrNoEntries) { + return gtserror.Newf("error selecting status siblings: %w", err) + } + + // Append possible IDs to threads slice. + threadIDs = append(threadIDs, ids...) + } + + if !*status.Local { + var ids []string + + // For remote statuses specifically, check to + // see if any children are stored for this new + // stored parent with their own thread IDs. + if err := tx. + NewSelect(). + Table("statuses"). + Column("thread_id"). + Where("? = ?", bun.Ident("in_reply_to_uri"), status.URI). + Scan(ctx, &ids); err != nil && !errors.Is(err, db.ErrNoEntries) { + return gtserror.Newf("error selecting status children: %w", err) + } + + // Append possible IDs to threads slice. + threadIDs = append(threadIDs, ids...) + } + + // Ensure only *unique* posssible thread IDs. + threadIDs = xslices.Deduplicate(threadIDs) + switch len(threadIDs) { + + case 0: + // No related status with thread ID already exists, + // so create new thread ID from status creation time. + threadID := id.NewULIDFromTime(status.CreatedAt) + + // Insert new thread. if _, err := tx. NewInsert(). - Model(>smodel.StatusToEmoji{ - StatusID: status.ID, - EmojiID: i, - }). - On("CONFLICT (?, ?) DO NOTHING", bun.Ident("status_id"), bun.Ident("emoji_id")). + Model(>smodel.Thread{ID: threadID}). Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } + return gtserror.Newf("error inserting thread: %w", err) + } + + // Update status thread ID. + status.ThreadID = threadID + + case 1: + // Inherit single known thread. + status.ThreadID = threadIDs[0] + + default: + var err error + log.Infof(ctx, "reconciling status threading for %s: [%s]", status.URI, strings.Join(threadIDs, ",")) + status.ThreadID, err = s.fixStatusThreading(ctx, tx, threadIDs) + if err != nil { + return err } } - // create links between this status and any tags it uses - for _, i := range status.TagIDs { - if _, err := tx. - NewInsert(). - Model(>smodel.StatusToTag{ - StatusID: status.ID, - TagID: i, - }). - On("CONFLICT (?, ?) DO NOTHING", bun.Ident("status_id"), bun.Ident("tag_id")). - Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } - } - } - - // change the status ID of the media - // attachments to the current status - for _, a := range status.Attachments { - a.StatusID = status.ID - if _, err := tx. - NewUpdate(). - Model(a). - Column("status_id"). - Where("? = ?", bun.Ident("media_attachment.id"), a.ID). - Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } - } - } - - // If the status is threaded, create - // link between thread and status. - if status.ThreadID != "" { - if _, err := tx. - NewInsert(). - Model(>smodel.ThreadToStatus{ - ThreadID: status.ThreadID, - StatusID: status.ID, - }). - On("CONFLICT (?, ?) DO NOTHING", bun.Ident("thread_id"), bun.Ident("status_id")). - Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } - } - } - - // Finally, insert the status - _, err := tx.NewInsert(). - Model(status). - Exec(ctx) - return err + // And after threading, insert status. + // This will error if ThreadID is unset. + return insertStatus(ctx, tx, status) }) }) } +// fixStatusThreading can be called to reconcile statuses in the same thread but known to be using multiple given threads. +func (s *statusDB) fixStatusThreading(ctx context.Context, tx bun.Tx, threadIDs []string) (string, error) { + if len(threadIDs) <= 1 { + panic("invalid call to fixStatusThreading()") + } + + // Sort ascending, i.e. + // oldest thread ID first. + slices.Sort(threadIDs) + + // Drop the oldest thread ID + // from slice, we'll keep this. + threadID := threadIDs[0] + threadIDs = threadIDs[1:] + + // On updates, gather IDs of changed model + // IDs for later stage of cache invalidation, + // preallocating slices for worst-case scenarios. + statusIDs := make([]string, 0, 4*len(threadIDs)) + muteIDs := make([]string, 0, 4*len(threadIDs)) + + // Update all statuses with + // thread IDs to use oldest. + if _, err := tx. + NewUpdate(). + Table("statuses"). + Where("? IN (?)", bun.Ident("thread_id"), bun.In(threadIDs)). + Set("? = ?", bun.Ident("thread_id"), threadID). + Returning("?", bun.Ident("id")). + Exec(ctx, &statusIDs); err != nil && !errors.Is(err, db.ErrNoEntries) { + return "", gtserror.Newf("error updating statuses: %w", err) + } + + // Update all thread mutes with + // thread IDs to use oldest. + if _, err := tx. + NewUpdate(). + Table("thread_mutes"). + Where("? IN (?)", bun.Ident("thread_id"), bun.In(threadIDs)). + Set("? = ?", bun.Ident("thread_id"), threadID). + Returning("?", bun.Ident("id")). + Exec(ctx, &muteIDs); err != nil && !errors.Is(err, db.ErrNoEntries) { + return "", gtserror.Newf("error updating thread mutes: %w", err) + } + + // Delete all now + // unused thread IDs. + if _, err := tx. + NewDelete(). + Table("threads"). + Where("? IN (?)", bun.Ident("id"), bun.In(threadIDs)). + Exec(ctx); err != nil { + return "", gtserror.Newf("error deleting threads: %w", err) + } + + // Invalidate caches for changed statuses and mutes. + s.state.Caches.DB.Status.InvalidateIDs("ID", statusIDs) + s.state.Caches.DB.ThreadMute.InvalidateIDs("ID", muteIDs) + + return threadID, nil +} + +// insertStatus handles the base status insert logic, that is the status itself, +// any intermediary table links, and updating media attachments to point to status. +func insertStatus(ctx context.Context, tx bun.Tx, status *gtsmodel.Status) error { + + // create links between this + // status and any emojis it uses + for _, id := range status.EmojiIDs { + if _, err := tx. + NewInsert(). + Model(>smodel.StatusToEmoji{ + StatusID: status.ID, + EmojiID: id, + }). + Exec(ctx); err != nil { + return gtserror.Newf("error inserting status_to_emoji: %w", err) + } + } + + // create links between this + // status and any tags it uses + for _, id := range status.TagIDs { + if _, err := tx. + NewInsert(). + Model(>smodel.StatusToTag{ + StatusID: status.ID, + TagID: id, + }). + Exec(ctx); err != nil { + return gtserror.Newf("error inserting status_to_tag: %w", err) + } + } + + // change the status ID of the media + // attachments to the current status + for _, a := range status.Attachments { + a.StatusID = status.ID + if _, err := tx. + NewUpdate(). + Model(a). + Column("status_id"). + Where("? = ?", bun.Ident("media_attachment.id"), a.ID). + Exec(ctx); err != nil { + return gtserror.Newf("error updating media: %w", err) + } + } + + // Finally, insert the status + if _, err := tx.NewInsert(). + Model(status). + Exec(ctx); err != nil { + return gtserror.Newf("error inserting status: %w", err) + } + + return nil +} + func (s *statusDB) UpdateStatus(ctx context.Context, status *gtsmodel.Status, columns ...string) error { return s.state.Caches.DB.Status.Store(status, func() error { // It is safe to run this database transaction within cache.Store // as the cache does not attempt a mutex lock until AFTER hook. // return s.db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { - // create links between this status and any emojis it uses - for _, i := range status.EmojiIDs { + + // create links between this + // status and any emojis it uses + for _, id := range status.EmojiIDs { if _, err := tx. NewInsert(). Model(>smodel.StatusToEmoji{ StatusID: status.ID, - EmojiID: i, + EmojiID: id, }). On("CONFLICT (?, ?) DO NOTHING", bun.Ident("status_id"), bun.Ident("emoji_id")). Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } + return err } } - // create links between this status and any tags it uses - for _, i := range status.TagIDs { + // create links between this + // status and any tags it uses + for _, id := range status.TagIDs { if _, err := tx. NewInsert(). Model(>smodel.StatusToTag{ StatusID: status.ID, - TagID: i, + TagID: id, }). On("CONFLICT (?, ?) DO NOTHING", bun.Ident("status_id"), bun.Ident("tag_id")). Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } + return err } } @@ -457,26 +628,7 @@ func (s *statusDB) UpdateStatus(ctx context.Context, status *gtsmodel.Status, co Column("status_id"). Where("? = ?", bun.Ident("media_attachment.id"), a.ID). Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } - } - } - - // If the status is threaded, create - // link between thread and status. - if status.ThreadID != "" { - if _, err := tx. - NewInsert(). - Model(>smodel.ThreadToStatus{ - ThreadID: status.ThreadID, - StatusID: status.ID, - }). - On("CONFLICT (?, ?) DO NOTHING", bun.Ident("thread_id"), bun.Ident("status_id")). - Exec(ctx); err != nil { - if !errors.Is(err, db.ErrAlreadyExists) { - return err - } + return err } } @@ -499,7 +651,9 @@ func (s *statusDB) DeleteStatusByID(ctx context.Context, id string) error { // Delete status from database and any related links in a transaction. if err := s.db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { - // delete links between this status and any emojis it uses + + // delete links between this + // status and any emojis it uses if _, err := tx. NewDelete(). TableExpr("? AS ?", bun.Ident("status_to_emojis"), bun.Ident("status_to_emoji")). @@ -508,7 +662,8 @@ func (s *statusDB) DeleteStatusByID(ctx context.Context, id string) error { return err } - // delete links between this status and any tags it uses + // delete links between this + // status and any tags it uses if _, err := tx. NewDelete(). TableExpr("? AS ?", bun.Ident("status_to_tags"), bun.Ident("status_to_tag")). @@ -517,16 +672,6 @@ func (s *statusDB) DeleteStatusByID(ctx context.Context, id string) error { return err } - // Delete links between this status - // and any threads it was a part of. - if _, err := tx. - NewDelete(). - TableExpr("? AS ?", bun.Ident("thread_to_statuses"), bun.Ident("thread_to_status")). - Where("? = ?", bun.Ident("thread_to_status.status_id"), id). - Exec(ctx); err != nil { - return err - } - // delete the status itself if _, err := tx. NewDelete(). diff --git a/internal/db/bundb/status_test.go b/internal/db/bundb/status_test.go index 9c1eb73bd..7d33763df 100644 --- a/internal/db/bundb/status_test.go +++ b/internal/db/bundb/status_test.go @@ -21,8 +21,12 @@ import ( "testing" "time" + "code.superseriousbusiness.org/gotosocial/internal/ap" "code.superseriousbusiness.org/gotosocial/internal/db" + "code.superseriousbusiness.org/gotosocial/internal/gtscontext" "code.superseriousbusiness.org/gotosocial/internal/gtsmodel" + "code.superseriousbusiness.org/gotosocial/internal/id" + "code.superseriousbusiness.org/gotosocial/internal/util" "github.com/stretchr/testify/suite" ) @@ -253,6 +257,302 @@ func (suite *StatusTestSuite) TestPutPopulatedStatus() { ) } +func (suite *StatusTestSuite) TestPutStatusThreadingBoostOfIDSet() { + ctx := suite.T().Context() + + // Fake account details. + accountID := id.NewULID() + accountURI := "https://example.com/users/" + accountID + + var err error + + // Prepare new status. + statusID := id.NewULID() + statusURI := accountURI + "/statuses/" + statusID + status := >smodel.Status{ + ID: statusID, + URI: statusURI, + AccountID: accountID, + AccountURI: accountURI, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + } + + // Insert original status into database. + err = suite.db.PutStatus(ctx, status) + suite.NoError(err) + suite.NotEmpty(status.ThreadID) + + // Prepare new boost. + boostID := id.NewULID() + boostURI := accountURI + "/statuses/" + boostID + boost := >smodel.Status{ + ID: boostID, + URI: boostURI, + AccountID: accountID, + AccountURI: accountURI, + BoostOfID: statusID, + BoostOfAccountID: accountID, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + } + + // Insert boost wrapper into database. + err = suite.db.PutStatus(ctx, boost) + suite.NoError(err) + + // Boost wrapper should have inherited thread. + suite.Equal(status.ThreadID, boost.ThreadID) +} + +func (suite *StatusTestSuite) TestPutStatusThreadingInReplyToIDSet() { + ctx := suite.T().Context() + + // Fake account details. + accountID := id.NewULID() + accountURI := "https://example.com/users/" + accountID + + var err error + + // Prepare new status. + statusID := id.NewULID() + statusURI := accountURI + "/statuses/" + statusID + status := >smodel.Status{ + ID: statusID, + URI: statusURI, + AccountID: accountID, + AccountURI: accountURI, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + } + + // Insert original status into database. + err = suite.db.PutStatus(ctx, status) + suite.NoError(err) + suite.NotEmpty(status.ThreadID) + + // Prepare new reply. + replyID := id.NewULID() + replyURI := accountURI + "/statuses/" + replyID + reply := >smodel.Status{ + ID: replyID, + URI: replyURI, + AccountID: accountID, + AccountURI: accountURI, + InReplyToID: statusID, + InReplyToURI: statusURI, + InReplyToAccountID: accountID, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + } + + // Insert status reply into database. + err = suite.db.PutStatus(ctx, reply) + suite.NoError(err) + + // Status reply should have inherited thread. + suite.Equal(status.ThreadID, reply.ThreadID) +} + +func (suite *StatusTestSuite) TestPutStatusThreadingSiblings() { + ctx := suite.T().Context() + + // Fake account details. + accountID := id.NewULID() + accountURI := "https://example.com/users/" + accountID + + // Main parent status ID. + statusID := id.NewULID() + statusURI := accountURI + "/statuses/" + statusID + status := >smodel.Status{ + ID: statusID, + URI: statusURI, + AccountID: accountID, + AccountURI: accountURI, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + } + + const siblingCount = 10 + var statuses []*gtsmodel.Status + for range siblingCount { + id := id.NewULID() + uri := accountURI + "/statuses/" + id + + // Note here that inReplyToID not being set, + // so as they get inserted it's as if children + // are being dereferenced ahead of stored parent. + // + // Which is where out-of-sync threads can occur. + statuses = append(statuses, >smodel.Status{ + ID: id, + URI: uri, + AccountID: accountID, + AccountURI: accountURI, + InReplyToURI: statusURI, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + }) + } + + var err error + var threadID string + + // Insert all of the sibling children + // into the database, they should all + // still get correctly threaded together. + for _, child := range statuses { + err = suite.db.PutStatus(ctx, child) + suite.NoError(err) + suite.NotEmpty(child.ThreadID) + if threadID == "" { + threadID = child.ThreadID + } else { + suite.Equal(threadID, child.ThreadID) + } + } + + // Finally, insert the parent status. + err = suite.db.PutStatus(ctx, status) + suite.NoError(err) + + // Parent should have inherited thread. + suite.Equal(threadID, status.ThreadID) +} + +func (suite *StatusTestSuite) TestPutStatusThreadingReconcile() { + ctx := suite.T().Context() + + // Fake account details. + accountID := id.NewULID() + accountURI := "https://example.com/users/" + accountID + + const threadLength = 10 + var statuses []*gtsmodel.Status + var lastURI, lastID string + + // Generate front-half of thread. + for range threadLength / 2 { + id := id.NewULID() + uri := accountURI + "/statuses/" + id + statuses = append(statuses, >smodel.Status{ + ID: id, + URI: uri, + AccountID: accountID, + AccountURI: accountURI, + InReplyToID: lastID, + InReplyToURI: lastURI, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + }) + lastURI = uri + lastID = id + } + + // Generate back-half of thread. + // + // Note here that inReplyToID not being set past + // the first item, so as they get inserted it's + // as if the children are dereferenced ahead of + // the stored parent, i.e. an out-of-sync thread. + for range threadLength / 2 { + id := id.NewULID() + uri := accountURI + "/statuses/" + id + statuses = append(statuses, >smodel.Status{ + ID: id, + URI: uri, + AccountID: accountID, + AccountURI: accountURI, + InReplyToID: lastID, + InReplyToURI: lastURI, + Local: util.Ptr(false), + Federated: util.Ptr(true), + ActivityStreamsType: ap.ObjectNote, + }) + lastURI = uri + lastID = "" + } + + var err error + + // Thread IDs we expect to see for + // head statuses as we add them, and + // for tail statuses as we add them. + var thread0, threadN string + + // Insert status thread from head and tail, + // specifically stopping before the middle. + // These should each get threaded separately. + for i := range (threadLength / 2) - 1 { + i0, iN := i, len(statuses)-1-i + + // Insert i'th status from the start. + err = suite.db.PutStatus(ctx, statuses[i0]) + suite.NoError(err) + suite.NotEmpty(statuses[i0].ThreadID) + + // Check i0 thread. + if thread0 == "" { + thread0 = statuses[i0].ThreadID + } else { + suite.Equal(thread0, statuses[i0].ThreadID) + } + + // Insert i'th status from the end. + err = suite.db.PutStatus(ctx, statuses[iN]) + suite.NoError(err) + suite.NotEmpty(statuses[iN].ThreadID) + + // Check iN thread. + if threadN == "" { + threadN = statuses[iN].ThreadID + } else { + suite.Equal(threadN, statuses[iN].ThreadID) + } + } + + // Finally, insert remaining statuses, + // at some point among these it should + // trigger a status thread reconcile. + for _, status := range statuses { + + if status.ThreadID != "" { + // already inserted + continue + } + + // Insert remaining status into db. + err = suite.db.PutStatus(ctx, status) + suite.NoError(err) + } + + // The reconcile should pick the older, + // i.e. smaller of two ULID thread IDs. + finalThreadID := min(thread0, threadN) + for _, status := range statuses { + + // Get ID of status. + id := status.ID + + // Fetch latest status the from database. + status, err := suite.db.GetStatusByID( + gtscontext.SetBarebones(ctx), + id, + ) + suite.NoError(err) + + // Ensure after reconcile uses expected thread. + suite.Equal(finalThreadID, status.ThreadID) + } +} + func TestStatusTestSuite(t *testing.T) { suite.Run(t, new(StatusTestSuite)) } diff --git a/internal/db/status.go b/internal/db/status.go index d1bdb6106..58dbe5dc1 100644 --- a/internal/db/status.go +++ b/internal/db/status.go @@ -47,7 +47,7 @@ type Status interface { // PopulateStatusEdits ensures that status' edits are fully popualted. PopulateStatusEdits(ctx context.Context, status *gtsmodel.Status) error - // PutStatus stores one status in the database. + // PutStatus stores one status in the database, this also handles status threading. PutStatus(ctx context.Context, status *gtsmodel.Status) error // UpdateStatus updates one status in the database. diff --git a/internal/federation/dereferencing/announce.go b/internal/federation/dereferencing/announce.go index 5d83b48a9..f05fde760 100644 --- a/internal/federation/dereferencing/announce.go +++ b/internal/federation/dereferencing/announce.go @@ -101,7 +101,7 @@ func (d *Dereferencer) EnrichAnnounce( // Generate an ID for the boost wrapper status. boost.ID = id.NewULIDFromTime(boost.CreatedAt) - // Store the boost wrapper status in database. + // Store the remote boost wrapper status in database. switch err = d.state.DB.PutStatus(ctx, boost); { case err == nil: // all groovy. diff --git a/internal/federation/dereferencing/status.go b/internal/federation/dereferencing/status.go index 01538f5ab..ce1ee2457 100644 --- a/internal/federation/dereferencing/status.go +++ b/internal/federation/dereferencing/status.go @@ -22,7 +22,6 @@ import ( "errors" "net/http" "net/url" - "slices" "time" "code.superseriousbusiness.org/gotosocial/internal/ap" @@ -571,15 +570,6 @@ func (d *Dereferencer) enrichStatus( return nil, nil, gtserror.Newf("error populating mentions for status %s: %w", uri, err) } - // Ensure status in a thread is connected. - threadChanged, err := d.threadStatus(ctx, - status, - latestStatus, - ) - if err != nil { - return nil, nil, gtserror.Newf("error handling threading for status %s: %w", uri, err) - } - // Populate tags associated with status, passing // in existing status to reuse old where possible. tagsChanged, err := d.fetchStatusTags(ctx, @@ -614,7 +604,7 @@ func (d *Dereferencer) enrichStatus( } if isNew { - // Simplest case, insert this new status into the database. + // Simplest case, insert this new remote status into the database. if err := d.state.DB.PutStatus(ctx, latestStatus); err != nil { return nil, nil, gtserror.Newf("error inserting new status %s: %w", uri, err) } @@ -627,7 +617,6 @@ func (d *Dereferencer) enrichStatus( latestStatus, pollChanged, mentionsChanged, - threadChanged, tagsChanged, mediaChanged, emojiChanged, @@ -736,81 +725,6 @@ func (d *Dereferencer) fetchStatusMentions( return changed, nil } -// threadStatus ensures that given status is threaded correctly -// where necessary. that is it will inherit a thread ID from the -// existing copy if it is threaded correctly, else it will inherit -// a thread ID from a parent with existing thread, else it will -// generate a new thread ID if status mentions a local account. -func (d *Dereferencer) threadStatus( - ctx context.Context, - existing *gtsmodel.Status, - status *gtsmodel.Status, -) ( - changed bool, - err error, -) { - - // Check for existing status - // that is already threaded. - if existing.ThreadID != "" { - - // Existing is threaded correctly. - if existing.InReplyTo == nil || - existing.InReplyTo.ThreadID == existing.ThreadID { - status.ThreadID = existing.ThreadID - return false, nil - } - - // TODO: delete incorrect thread - } - - // Check for existing parent to inherit threading from. - if inReplyTo := status.InReplyTo; inReplyTo != nil && - inReplyTo.ThreadID != "" { - status.ThreadID = inReplyTo.ThreadID - return true, nil - } - - // Parent wasn't threaded. If this - // status mentions a local account, - // we should thread it so that local - // account can mute it if they want. - mentionsLocal := slices.ContainsFunc( - status.Mentions, - func(m *gtsmodel.Mention) bool { - // If TargetAccount couldn't - // be deref'd, we know it's not - // a local account, so only - // check for non-nil accounts. - return m.TargetAccount != nil && - m.TargetAccount.IsLocal() - }, - ) - - if !mentionsLocal { - // Status doesn't mention a - // local account, so we don't - // need to thread it. - return false, nil - } - - // Status mentions a local account. - // Create a new thread and assign - // it to the status. - threadID := id.NewULID() - - // Insert new thread model into db. - if err := d.state.DB.PutThread(ctx, - >smodel.Thread{ID: threadID}, - ); err != nil { - return false, gtserror.Newf("error inserting new thread in db: %w", err) - } - - // Set thread on latest status. - status.ThreadID = threadID - return true, nil -} - // fetchStatusTags populates the tags on 'status', fetching existing // from the database and creating new where needed. 'existing' is used // to fetch tags that have not changed since previous stored status. @@ -1135,7 +1049,6 @@ func (d *Dereferencer) handleStatusEdit( status *gtsmodel.Status, pollChanged bool, mentionsChanged bool, - threadChanged bool, tagsChanged bool, mediaChanged bool, emojiChanged bool, @@ -1193,14 +1106,6 @@ func (d *Dereferencer) handleStatusEdit( // been previously populated properly. } - if threadChanged { - cols = append(cols, "thread_id") - - // Thread changed doesn't necessarily - // indicate an edit, it may just now - // actually be included in a thread. - } - if tagsChanged { cols = append(cols, "tags") // i.e. TagIDs diff --git a/internal/gtsmodel/status.go b/internal/gtsmodel/status.go index 884caac0c..b6bc303cd 100644 --- a/internal/gtsmodel/status.go +++ b/internal/gtsmodel/status.go @@ -27,56 +27,56 @@ import ( // Status represents a user-created 'post' or 'status' in the database, either remote or local type Status struct { - ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database - CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created - EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set) - FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched. - PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time. - URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status - URL string `bun:",nullzero"` // web url for viewing this status - Content string `bun:""` // Content HTML for this status. - AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status - Attachments []*MediaAttachment `bun:"attached_media,rel:has-many"` // Attachments corresponding to attachmentIDs - TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status - Tags []*Tag `bun:"attached_tags,m2m:status_to_tags"` // Tags corresponding to tagIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation - MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status - Mentions []*Mention `bun:"attached_mentions,rel:has-many"` // Mentions corresponding to mentionIDs - EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status - Emojis []*Emoji `bun:"attached_emojis,m2m:status_to_emojis"` // Emojis corresponding to emojiIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation - Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account? - AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status? - Account *Account `bun:"rel:belongs-to"` // account corresponding to accountID - AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status - InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to - InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to - InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to - InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID - InReplyToAccount *Account `bun:"rel:belongs-to"` // account corresponding to inReplyToAccountID - BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of - BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes. - BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status - BoostOf *Status `bun:"-"` // status that corresponds to boostOfID - BoostOfAccount *Account `bun:"rel:belongs-to"` // account that corresponds to boostOfAccountID - ThreadID string `bun:"type:CHAR(26),nullzero"` // id of the thread to which this status belongs; only set for remote statuses if a local account is involved at some point in the thread, otherwise null - EditIDs []string `bun:"edits,array"` // - Edits []*StatusEdit `bun:"-"` // - PollID string `bun:"type:CHAR(26),nullzero"` // - Poll *Poll `bun:"-"` // - ContentWarning string `bun:",nullzero"` // Content warning HTML for this status. - ContentWarningText string `bun:""` // Original text of the content warning without formatting - Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status - Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive? - Language string `bun:",nullzero"` // what language is this status written in? - CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status? - CreatedWithApplication *Application `bun:"rel:belongs-to"` // application corresponding to createdWithApplicationID - ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!. - Text string `bun:""` // Original text of the status without formatting - ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status - Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s) - InteractionPolicy *InteractionPolicy `bun:""` // InteractionPolicy for this status. If null then the default InteractionPolicy should be assumed for this status's Visibility. Always null for boost wrappers. - PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. - PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. - ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to. + ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database + CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created + EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set) + FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched. + PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time. + URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status + URL string `bun:",nullzero"` // web url for viewing this status + Content string `bun:""` // Content HTML for this status. + AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status + Attachments []*MediaAttachment `bun:"attached_media,rel:has-many"` // Attachments corresponding to attachmentIDs + TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status + Tags []*Tag `bun:"attached_tags,m2m:status_to_tags"` // Tags corresponding to tagIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation + MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status + Mentions []*Mention `bun:"attached_mentions,rel:has-many"` // Mentions corresponding to mentionIDs + EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status + Emojis []*Emoji `bun:"attached_emojis,m2m:status_to_emojis"` // Emojis corresponding to emojiIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation + Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account? + AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status? + Account *Account `bun:"rel:belongs-to"` // account corresponding to accountID + AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status + InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to + InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to + InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to + InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID + InReplyToAccount *Account `bun:"rel:belongs-to"` // account corresponding to inReplyToAccountID + BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of + BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes. + BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status + BoostOf *Status `bun:"-"` // status that corresponds to boostOfID + BoostOfAccount *Account `bun:"rel:belongs-to"` // account that corresponds to boostOfAccountID + ThreadID string `bun:"type:CHAR(26),nullzero,notnull,default:00000000000000000000000000"` // id of the thread to which this status belongs + EditIDs []string `bun:"edits,array"` // + Edits []*StatusEdit `bun:"-"` // + PollID string `bun:"type:CHAR(26),nullzero"` // + Poll *Poll `bun:"-"` // + ContentWarning string `bun:",nullzero"` // Content warning HTML for this status. + ContentWarningText string `bun:""` // Original text of the content warning without formatting + Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status + Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive? + Language string `bun:",nullzero"` // what language is this status written in? + CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status? + CreatedWithApplication *Application `bun:"rel:belongs-to"` // application corresponding to createdWithApplicationID + ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!. + Text string `bun:""` // Original text of the status without formatting + ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status + Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s) + InteractionPolicy *InteractionPolicy `bun:""` // InteractionPolicy for this status. If null then the default InteractionPolicy should be assumed for this status's Visibility. Always null for boost wrappers. + PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. + PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. + ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to. } // GetID implements timeline.Timelineable{}. diff --git a/internal/gtsmodel/thread.go b/internal/gtsmodel/thread.go index 5d5af1993..34f921f8e 100644 --- a/internal/gtsmodel/thread.go +++ b/internal/gtsmodel/thread.go @@ -23,10 +23,3 @@ type Thread struct { ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database StatusIDs []string `bun:"-"` // ids of statuses belonging to this thread (order not guaranteed) } - -// ThreadToStatus is an intermediate struct to facilitate the -// many2many relationship between a thread and one or more statuses. -type ThreadToStatus struct { - ThreadID string `bun:"type:CHAR(26),unique:statusthread,nullzero,notnull"` - StatusID string `bun:"type:CHAR(26),unique:statusthread,nullzero,notnull"` -} diff --git a/internal/middleware/nollamas.go b/internal/middleware/nollamas.go index 914469a24..6191ebe3b 100644 --- a/internal/middleware/nollamas.go +++ b/internal/middleware/nollamas.go @@ -26,6 +26,7 @@ import ( "hash" "io" "net/http" + "strconv" "time" apimodel "code.superseriousbusiness.org/gotosocial/internal/api/model" @@ -35,6 +36,7 @@ import ( "code.superseriousbusiness.org/gotosocial/internal/gtserror" "code.superseriousbusiness.org/gotosocial/internal/log" "code.superseriousbusiness.org/gotosocial/internal/oauth" + "codeberg.org/gruf/go-bitutil" "codeberg.org/gruf/go-byteutil" "github.com/gin-gonic/gin" ) @@ -60,49 +62,79 @@ func NoLLaMas( return func(*gin.Context) {} } - seed := make([]byte, 32) + var seed [32]byte // Read random data for the token seed. - _, err := io.ReadFull(rand.Reader, seed) + _, err := io.ReadFull(rand.Reader, seed[:]) if err != nil { panic(err) } // Configure nollamas. var nollamas nollamas - nollamas.seed = seed + nollamas.entropy = seed nollamas.ttl = time.Hour - nollamas.diff = config.GetAdvancedScraperDeterrenceDifficulty() + nollamas.rounds = config.GetAdvancedScraperDeterrenceDifficulty() nollamas.getInstanceV1 = getInstanceV1 nollamas.policy = cookiePolicy return nollamas.Serve } +// i.e. hash slice length. +const hashLen = sha256.Size + +// i.e. hex.EncodedLen(hashLen). +const encodedHashLen = 2 * hashLen + // hashWithBufs encompasses a hash along // with the necessary buffers to generate // a hashsum and then encode that sum. type hashWithBufs struct { hash hash.Hash - hbuf []byte - ebuf []byte + hbuf [hashLen]byte + ebuf [encodedHashLen]byte +} + +// write is a passthrough to hash.Hash{}.Write(). +func (h *hashWithBufs) write(b []byte) { + _, _ = h.hash.Write(b) +} + +// writeString is a passthrough to hash.Hash{}.Write([]byte(s)). +func (h *hashWithBufs) writeString(s string) { + _, _ = h.hash.Write(byteutil.S2B(s)) +} + +// EncodedSum returns the hex encoded sum of hash.Sum(). +func (h *hashWithBufs) EncodedSum() string { + _ = h.hash.Sum(h.hbuf[:0]) + hex.Encode(h.ebuf[:], h.hbuf[:]) + return string(h.ebuf[:]) +} + +// Reset will reset hash and buffers. +func (h *hashWithBufs) Reset() { + h.ebuf = [encodedHashLen]byte{} + h.hbuf = [hashLen]byte{} + h.hash.Reset() } type nollamas struct { // our instance cookie policy. policy apiutil.CookiePolicy - // unique token seed + // unique entropy // to prevent hashes // being guessable - seed []byte + entropy [32]byte // success cookie TTL ttl time.Duration - // algorithm difficulty knobs. - // diff determines the number - // of leading zeroes required. - diff uint8 + // rounds determines roughly how + // many hash-encode rounds each + // client is required to complete. + rounds uint32 // extra fields required for // our template rendering. @@ -134,18 +166,8 @@ func (m *nollamas) Serve(c *gin.Context) { return } - // i.e. outputted hash slice length. - const hashLen = sha256.Size - - // i.e. hex.EncodedLen(hashLen). - const encodedHashLen = 2 * hashLen - - // Prepare hash + buffers. - hash := hashWithBufs{ - hash: sha256.New(), - hbuf: make([]byte, 0, hashLen), - ebuf: make([]byte, encodedHashLen), - } + // Prepare new hash with buffers. + hash := hashWithBufs{hash: sha256.New()} // Extract client fingerprint data. userAgent := c.GetHeader("User-Agent") @@ -153,15 +175,7 @@ func (m *nollamas) Serve(c *gin.Context) { // Generate a unique token for this request, // only valid for a period of now +- m.ttl. - token := m.token(&hash, userAgent, clientIP) - - // For unique challenge string just use a - // single portion of their 'success' token. - // SHA256 is not yet cracked, this is not an - // application of a hash requiring serious - // cryptographic security and it rotates on - // a TTL basis, so it should be fine. - challenge := token[:len(token)/4] + token := m.getToken(&hash, userAgent, clientIP) // Check for a provided success token. cookie, _ := c.Cookie("gts-nollamas") @@ -169,8 +183,8 @@ func (m *nollamas) Serve(c *gin.Context) { // Check whether passed cookie // is the expected success token. if subtle.ConstantTimeCompare( - byteutil.S2B(token), byteutil.S2B(cookie), + byteutil.S2B(token), ) == 1 { // They passed us a valid, expected @@ -185,10 +199,15 @@ func (m *nollamas) Serve(c *gin.Context) { // handlers from being called. c.Abort() + // Generate challenge for this unique (yet deterministic) token, + // returning seed, wanted 'challenge' result and expected solution. + seed, challenge, solution := m.getChallenge(&hash, token) + // Prepare new log entry. l := log.WithContext(ctx). WithField("userAgent", userAgent). - WithField("challenge", challenge) + WithField("seed", seed). + WithField("rounds", solution) // Extract and parse query. query := c.Request.URL.Query() @@ -196,32 +215,28 @@ func (m *nollamas) Serve(c *gin.Context) { // Check query to see if an in-progress // challenge solution has been provided. nonce := query.Get("nollamas_solution") - if nonce == "" || len(nonce) > 20 { + if nonce == "" { - // noting that here, 20 is - // max integer string len. - // - // An invalid solution string, just - // present them with new challenge. + // No solution given, likely new client! + // Simply present them with challenge. + m.renderChallenge(c, seed, challenge) l.Info("posing new challenge") - m.renderChallenge(c, challenge) return } - // Reset the hash. - hash.hash.Reset() + // Check nonce matches expected. + if subtle.ConstantTimeCompare( + byteutil.S2B(solution), + byteutil.S2B(nonce), + ) != 1 { - // Check challenge+nonce as possible solution. - if !m.checkChallenge(&hash, challenge, nonce) { - - // They failed challenge, - // re-present challenge page. - l.Info("invalid solution provided") - m.renderChallenge(c, challenge) + // Their nonce failed, re-challenge them. + m.renderChallenge(c, challenge, solution) + l.Infof("invalid solution provided: %s", nonce) return } - l.Infof("challenge passed: %s", nonce) + l.Info("challenge passed") // Drop solution query and encode. query.Del("nollamas_solution") @@ -233,7 +248,7 @@ func (m *nollamas) Serve(c *gin.Context) { c.Redirect(http.StatusTemporaryRedirect, c.Request.URL.RequestURI()) } -func (m *nollamas) renderChallenge(c *gin.Context, challenge string) { +func (m *nollamas) renderChallenge(c *gin.Context, seed, challenge string) { // Fetch current instance information for templating vars. instance, errWithCode := m.getInstanceV1(c.Request.Context()) if errWithCode != nil { @@ -252,8 +267,8 @@ func (m *nollamas) renderChallenge(c *gin.Context, challenge string) { "/assets/Fork-Awesome/css/fork-awesome.min.css", }, Extra: map[string]any{ - "challenge": challenge, - "difficulty": m.diff, + "seed": seed, + "challenge": challenge, }, Javascript: []apiutil.JavascriptEntry{ { @@ -264,23 +279,25 @@ func (m *nollamas) renderChallenge(c *gin.Context, challenge string) { }) } -func (m *nollamas) token(hash *hashWithBufs, userAgent, clientIP string) string { - // Use our unique seed to seed hash, +// getToken generates a unique yet deterministic token for given HTTP request +// details, seeded by runtime generated entropy data and ttl rounded timestamp. +func (m *nollamas) getToken(hash *hashWithBufs, userAgent, clientIP string) string { + + // Reset before + // using hash. + hash.Reset() + + // Use our unique entropy to seed hash, // to ensure we have cryptographically // unique, yet deterministic, tokens // generated for a given http client. - hash.hash.Write(m.seed) - - // Include difficulty level in - // hash input data so if config - // changes then token invalidates. - hash.hash.Write([]byte{m.diff}) + hash.write(m.entropy[:]) // Also seed the generated input with // current time rounded to TTL, so our // single comparison handles expiries. now := time.Now().Round(m.ttl).Unix() - hash.hash.Write([]byte{ + hash.write([]byte{ byte(now >> 56), byte(now >> 48), byte(now >> 40), @@ -291,37 +308,78 @@ func (m *nollamas) token(hash *hashWithBufs, userAgent, clientIP string) string byte(now), }) - // Finally, append unique client request data. - hash.hash.Write(byteutil.S2B(userAgent)) - hash.hash.Write(byteutil.S2B(clientIP)) + // Append client request data. + hash.writeString(userAgent) + hash.writeString(clientIP) - // Return hex encoded hash output. - hash.hbuf = hash.hash.Sum(hash.hbuf[:0]) - hex.Encode(hash.ebuf, hash.hbuf) - return string(hash.ebuf) + // Return hex encoded hash. + return hash.EncodedSum() } -func (m *nollamas) checkChallenge(hash *hashWithBufs, challenge, nonce string) bool { - // Hash and encode input challenge with - // proposed nonce as a possible solution. - hash.hash.Write(byteutil.S2B(challenge)) - hash.hash.Write(byteutil.S2B(nonce)) - hash.hbuf = hash.hash.Sum(hash.hbuf[:0]) - hex.Encode(hash.ebuf, hash.hbuf) - solution := hash.ebuf +// getChallenge prepares a new challenge given the deterministic input token for this request. +// it will return an input seed string, a challenge string which is the end result the client +// should be looking for, and the solution for this such that challenge = hex(sha256(seed + solution)). +// the solution will always be a string-encoded 64bit integer calculated from m.rounds + random jitter. +func (m *nollamas) getChallenge(hash *hashWithBufs, token string) (seed, challenge, solution string) { - // Compiler bound-check hint. - if len(solution) < int(m.diff) { - panic(gtserror.New("BCE")) + // For their unique seed string just use a + // single portion of their 'success' token. + // SHA256 is not yet cracked, this is not an + // application of a hash requiring serious + // cryptographic security and it rotates on + // a TTL basis, so it should be fine. + seed = token[:len(token)/4] + + // BEFORE resetting the hash, get the last + // two bytes of NON-hex-encoded data from + // token generation to use for random jitter. + // This is taken from the end of the hash as + // this is the "unseen" end part of token. + // + // (if we used hex-encoded data it would + // only ever be '0-9' or 'a-z' ASCII chars). + // + // Security-wise, same applies as-above. + jitter := int16(hash.hbuf[len(hash.hbuf)-2]) | + int16(hash.hbuf[len(hash.hbuf)-1])<<8 + + var rounds int64 + switch { + // For some small percentage of + // clients we purposely low-ball + // their rounds required, to make + // it so gaming it with a starting + // nonce value may suddenly fail. + case jitter%37 == 0: + rounds = int64(m.rounds/10) + int64(jitter/10) + case jitter%31 == 0: + rounds = int64(m.rounds/5) + int64(jitter/5) + case jitter%29 == 0: + rounds = int64(m.rounds/3) + int64(jitter/3) + case jitter%13 == 0: + rounds = int64(m.rounds/2) + int64(jitter/2) + + // Determine an appropriate number of hash rounds + // we want the client to perform on input seed. This + // is determined as configured m.rounds +- jitter. + // This will be the 'solution' to create 'challenge'. + default: + rounds = int64(m.rounds) + int64(jitter) //nolint:gosec } - // Check that the first 'diff' - // many chars are indeed zeroes. - for i := range m.diff { - if solution[i] != '0' { - return false - } - } + // Encode (positive) determined hash rounds as string. + solution = strconv.FormatInt(bitutil.Abs64(rounds), 10) - return true + // Reset before + // using hash. + hash.Reset() + + // Calculate the expected result + // of hex(sha256(seed + solution)), + // i.e. the proposed 'challenge'. + hash.writeString(seed) + hash.writeString(solution) + challenge = hash.EncodedSum() + + return } diff --git a/internal/middleware/nollamas_test.go b/internal/middleware/nollamas_test.go index ffe8cbbc8..f6b8e0e02 100644 --- a/internal/middleware/nollamas_test.go +++ b/internal/middleware/nollamas_test.go @@ -95,41 +95,39 @@ func testNoLLaMasMiddleware(t *testing.T, e *gin.Engine, userAgent string) { panic(err) } + var seed string var challenge string - var difficulty uint64 // Parse output body and find the challenge / difficulty. for _, line := range strings.Split(string(b), "\n") { line = strings.TrimSpace(line) switch { + case strings.HasPrefix(line, "data-nollamas-seed=\""): + line = line[20:] + line = line[:len(line)-1] + seed = line case strings.HasPrefix(line, "data-nollamas-challenge=\""): line = line[25:] line = line[:len(line)-1] challenge = line - case strings.HasPrefix(line, "data-nollamas-difficulty=\""): - line = line[26:] - line = line[:len(line)-1] - var err error - difficulty, err = strconv.ParseUint(line, 10, 8) - assert.NoError(t, err) } } // Ensure valid posed challenge. - assert.NotZero(t, difficulty) assert.NotEmpty(t, challenge) + assert.NotEmpty(t, seed) // Prepare a test request for gin engine. r = httptest.NewRequest("GET", "/", nil) r.Header.Set("User-Agent", userAgent) rw = httptest.NewRecorder() - // Now compute and set solution query paramater. - solution := computeSolution(challenge, difficulty) - r.URL.RawQuery = "nollamas_solution=" + solution - + t.Logf("seed=%s", seed) t.Logf("challenge=%s", challenge) - t.Logf("difficulty=%d", difficulty) + + // Now compute and set solution query paramater. + solution := computeSolution(seed, challenge) + r.URL.RawQuery = "nollamas_solution=" + solution t.Logf("solution=%s", solution) // Pass req through @@ -152,17 +150,14 @@ func testNoLLaMasMiddleware(t *testing.T, e *gin.Engine, userAgent string) { } // computeSolution does the functional equivalent of our nollamas workerTask.js. -func computeSolution(challenge string, diff uint64) string { -outer: +func computeSolution(seed, challenge string) string { for i := 0; ; i++ { solution := strconv.Itoa(i) - combined := challenge + solution + combined := seed + solution hash := sha256.Sum256(byteutil.S2B(combined)) encoded := hex.EncodeToString(hash[:]) - for i := range diff { - if encoded[i] != '0' { - continue outer - } + if encoded != challenge { + continue } return solution } diff --git a/internal/processing/status/create.go b/internal/processing/status/create.go index 23189411a..f9f986256 100644 --- a/internal/processing/status/create.go +++ b/internal/processing/status/create.go @@ -217,10 +217,6 @@ func (p *Processor) Create( return nil, errWithCode } - if errWithCode := p.processThreadID(ctx, status); errWithCode != nil { - return nil, errWithCode - } - // Process the incoming created status visibility. processVisibility(form, requester.Settings.Privacy, status) @@ -444,46 +440,6 @@ func (p *Processor) processInReplyTo( return nil } -func (p *Processor) processThreadID(ctx context.Context, status *gtsmodel.Status) gtserror.WithCode { - // Status takes the thread ID of - // whatever it replies to, if set. - // - // Might not be set if status is local - // and replies to a remote status that - // doesn't have a thread ID yet. - // - // If so, we can just thread from this - // status onwards instead, since this - // is where the relevant part of the - // thread starts, from the perspective - // of our instance at least. - if status.InReplyTo != nil && - status.InReplyTo.ThreadID != "" { - // Just inherit threadID from parent. - status.ThreadID = status.InReplyTo.ThreadID - return nil - } - - // Mark new thread (or threaded - // subsection) starting from here. - threadID := id.NewULID() - if err := p.state.DB.PutThread( - ctx, - >smodel.Thread{ - ID: threadID, - }, - ); err != nil { - err := gtserror.Newf("error inserting new thread in db: %w", err) - return gtserror.NewErrorInternalError(err) - } - - // Future replies to this status - // (if any) will inherit this thread ID. - status.ThreadID = threadID - - return nil -} - func processVisibility( form *apimodel.StatusCreateRequest, accountDefaultVis gtsmodel.Visibility, diff --git a/test/envparsing.sh b/test/envparsing.sh index f6a6dcc9a..4fe13b6ac 100755 --- a/test/envparsing.sh +++ b/test/envparsing.sh @@ -20,7 +20,7 @@ EXPECT=$(cat << "EOF" "127.0.0.1/32" ], "advanced-rate-limit-requests": 6969, - "advanced-scraper-deterrence-difficulty": 5, + "advanced-scraper-deterrence-difficulty": 500000, "advanced-scraper-deterrence-enabled": true, "advanced-sender-multiplier": -1, "advanced-throttling-multiplier": -1, @@ -309,7 +309,7 @@ GTS_SYSLOG_ADDRESS='127.0.0.1:6969' \ GTS_ADVANCED_COOKIES_SAMESITE='strict' \ GTS_ADVANCED_RATE_LIMIT_EXCEPTIONS="192.0.2.0/24,127.0.0.1/32" \ GTS_ADVANCED_RATE_LIMIT_REQUESTS=6969 \ -GTS_ADVANCED_SCRAPER_DETERRENCE_DIFFICULTY=5 \ +GTS_ADVANCED_SCRAPER_DETERRENCE_DIFFICULTY=500000 \ GTS_ADVANCED_SCRAPER_DETERRENCE_ENABLED=true \ GTS_ADVANCED_SENDER_MULTIPLIER=-1 \ GTS_ADVANCED_THROTTLING_MULTIPLIER=-1 \ diff --git a/testrig/config.go b/testrig/config.go index 558e5a54c..e7a594996 100644 --- a/testrig/config.go +++ b/testrig/config.go @@ -178,7 +178,7 @@ func testDefaults() config.Configuration { ScraperDeterrence: config.ScraperDeterrenceConfig{ Enabled: envBool("GTS_ADVANCED_SCRAPER_DETERRENCE_ENABLED", false), - Difficulty: uint8(envInt("GTS_ADVANCED_SCRAPER_DETERRENCE_DIFFICULTY", 4)), //nolint + Difficulty: uint32(envInt("GTS_ADVANCED_SCRAPER_DETERRENCE_DIFFICULTY", 100000)), //nolint }, }, diff --git a/testrig/db.go b/testrig/db.go index 4c8a3568d..3a5615f01 100644 --- a/testrig/db.go +++ b/testrig/db.go @@ -25,6 +25,7 @@ import ( "code.superseriousbusiness.org/gotosocial/internal/gtsmodel" "code.superseriousbusiness.org/gotosocial/internal/log" "code.superseriousbusiness.org/gotosocial/internal/state" + "codeberg.org/gruf/go-kv" ) var testModels = []interface{}{ @@ -58,7 +59,6 @@ var testModels = []interface{}{ >smodel.Tag{}, >smodel.Thread{}, >smodel.ThreadMute{}, - >smodel.ThreadToStatus{}, >smodel.User{}, >smodel.UserMute{}, >smodel.VAPIDKeyPair{}, @@ -201,7 +201,10 @@ func StandardDBSetup(db db.DB, accounts map[string]*gtsmodel.Account) { for _, v := range NewTestStatuses() { if err := db.Put(ctx, v); err != nil { - log.Panic(ctx, err) + log.PanicKVs(ctx, kv.Fields{ + {"error", err}, + {"status", v}, + }...) } } @@ -301,12 +304,6 @@ func StandardDBSetup(db db.DB, accounts map[string]*gtsmodel.Account) { } } - for _, v := range NewTestThreadToStatus() { - if err := db.Put(ctx, v); err != nil { - log.Panic(ctx, err) - } - } - for _, v := range NewTestPolls() { if err := db.Put(ctx, v); err != nil { log.Panic(ctx, err) diff --git a/testrig/testmodels.go b/testrig/testmodels.go index a17e2fae6..db221459b 100644 --- a/testrig/testmodels.go +++ b/testrig/testmodels.go @@ -2154,6 +2154,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { AccountID: "01F8MH17FWEB39HZJ76B6VXSKF", BoostOfID: "01F8MHAMCHF6Y650WCRSCP4WMY", BoostOfAccountID: "01F8MH1H7YV1Z7D2C8K2730QBF", + ThreadID: "01JV7NMMYX2Y38ZP3Y9SYJWT36", Visibility: gtsmodel.VisibilityPublic, Sensitive: util.Ptr(false), CreatedWithApplicationID: "01F8MGXQRHYF5QPMTMXP78QC2F", @@ -2312,6 +2313,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(true), AccountURI: "http://localhost:8080/users/the_mighty_zork", AccountID: "01F8MH1H7YV1Z7D2C8K2730QBF", + ThreadID: "01JV7PB3BPGFR13Q9B3XD4DJ5W", Visibility: gtsmodel.VisibilityFollowersOnly, Sensitive: util.Ptr(false), Language: "en", @@ -2378,6 +2380,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(true), AccountURI: "http://localhost:8080/users/the_mighty_zork", AccountID: "01F8MH1H7YV1Z7D2C8K2730QBF", + ThreadID: "01JV7NT07NPSJQC703A4D0FK49", EditIDs: []string{"01JDPZCZ2Y9KSGZW0R7ZG8T8Y2", "01JDPZDADMD1T9HKF94RECF7PP"}, Visibility: gtsmodel.VisibilityPublic, Sensitive: util.Ptr(false), @@ -2581,6 +2584,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(true), AccountURI: "http://localhost:8080/users/1happyturtle", AccountID: "01F8MH5NBDF2MV7CTC4Q5128HF", + ThreadID: "01JV7NVEBG7Q27WM66SPMBN3Q5", Visibility: gtsmodel.VisibilityPublic, Sensitive: util.Ptr(false), Language: "en", @@ -2604,6 +2608,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(true), AccountURI: "http://localhost:8080/users/the_mighty_zork", AccountID: "01F8MH5NBDF2MV7CTC4Q5128HF", + ThreadID: "01JV7NW0CD8Q8EWSF1RPC0AZXT", EditIDs: []string{"01JDPZPBXAX0M02YSEPB21KX4R", "01JDPZPJHKP7E3M0YQXEXPS1YT", "01JDPZPY3F85Y7B78ETRXEMWD9"}, Visibility: gtsmodel.VisibilityPublic, Sensitive: util.Ptr(false), @@ -2629,6 +2634,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(true), AccountURI: "http://localhost:8080/users/media_mogul", AccountID: "01JPCMD83Y4WR901094YES3QC5", + ThreadID: "01JV7NXDB7Z6YAFX8ZDKP9C20Y", Visibility: gtsmodel.VisibilityUnlocked, Sensitive: util.Ptr(false), Language: "en", @@ -2653,6 +2659,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(true), AccountURI: "http://localhost:8080/users/media_mogul", AccountID: "01JPCMD83Y4WR901094YES3QC5", + ThreadID: "01JV7NXSGST4TYA3SAPADQ04JR", Visibility: gtsmodel.VisibilityUnlocked, Sensitive: util.Ptr(false), Language: "en", @@ -2670,6 +2677,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(false), AccountURI: "http://fossbros-anonymous.io/users/foss_satan", AccountID: "01F8MH5ZK5VRH73AKHQM6Y9VNX", + ThreadID: "01JV7NY908EG95DQPJKTXKHCBW", Visibility: gtsmodel.VisibilityUnlocked, Sensitive: util.Ptr(false), Language: "en", @@ -2687,6 +2695,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(false), AccountURI: "http://fossbros-anonymous.io/users/foss_satan", AccountID: "01F8MH5ZK5VRH73AKHQM6Y9VNX", + ThreadID: "01JV7NYTCE3384MC1GRVC9V0K0", Visibility: gtsmodel.VisibilityUnlocked, Sensitive: util.Ptr(false), Language: "en", @@ -2705,6 +2714,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(false), AccountURI: "http://fossbros-anonymous.io/users/foss_satan", AccountID: "01F8MH5ZK5VRH73AKHQM6Y9VNX", + ThreadID: "01JV7NZ58GGQSVVZMK6P7EBADM", Visibility: gtsmodel.VisibilityUnlocked, Sensitive: util.Ptr(false), Language: "en", @@ -2725,6 +2735,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { Local: util.Ptr(false), AccountURI: "http://fossbros-anonymous.io/users/foss_satan", AccountID: "01F8MH5ZK5VRH73AKHQM6Y9VNX", + ThreadID: "01JV7NZWF1J2BVQ7SWMMRBYC58", EditIDs: []string{"01JDQ07ZZ4FGP13YN8TF63P5A6", "01JDQ08AYQC0G6413VAHA51CV9"}, PollID: "01JDQ0EZ5HM9T4WXRQ5WSVD40J", Visibility: gtsmodel.VisibilityPublic, @@ -2745,6 +2756,7 @@ func NewTestStatuses() map[string]*gtsmodel.Status { AccountURI: "http://example.org/users/Some_User", MentionIDs: []string{"01HE7XQNMKTVC8MNPCE1JGK4J3"}, AccountID: "01FHMQX3GAABWSM0S2VZEC2SWC", + ThreadID: "01HCWDF2Q4HV5QC161C4TGQ0M3", InReplyToID: "01F8MH75CBF9JFX4ZAD54N0W0R", InReplyToAccountID: "01F8MH17FWEB39HZJ76B6VXSKF", InReplyToURI: "http://localhost:8080/users/admin/statuses/01F8MH75CBF9JFX4ZAD54N0W0R", @@ -2985,75 +2997,6 @@ func NewTestThreads() map[string]*gtsmodel.Thread { } } -func NewTestThreadToStatus() []*gtsmodel.ThreadToStatus { - return []*gtsmodel.ThreadToStatus{ - { - ThreadID: "01HCWDF2Q4HV5QC161C4TGQ0M3", - StatusID: "01F8MH75CBF9JFX4ZAD54N0W0R", - }, - { - ThreadID: "01HCWDQ1C7APSEY34B1HFVHVX7", - StatusID: "01F8MHAAY43M6RJ473VQFCVH37", - }, - { - ThreadID: "01HCWDKKBWECZJQ93E262N36VN", - StatusID: "01FF25D5Q0DH7CHD57CTRS6WK0", - }, - { - ThreadID: "01HCWDKKBWECZJQ93E262N36VN", - StatusID: "01F8MHAMCHF6Y650WCRSCP4WMY", - }, - { - ThreadID: "01HCWDVTW3HQWSX66VJQ91Z1RH", - StatusID: "01F8MHAYFKS4KMXF8K5Y1C0KRN", - }, - { - ThreadID: "01HCWDY9PDNHDBDBBFTJKJY8XE", - StatusID: "01F8MHBBN8120SYH7D5S050MGK", - }, - { - ThreadID: "01HCWE0H2GKH794Q7GDPANH91Q", - StatusID: "01F8MH82FYRXD2RC6108DAJ5HB", - }, - { - ThreadID: "01HCWE1ERQSMMVWDD0BE491E2P", - StatusID: "01FCTA44PW9H1TB328S9AQXKDS", - }, - { - ThreadID: "01HCWE2Q24FWCZE41AS77SDFRZ", - StatusID: "01F8MHBQCBTDKN6X5VHGMMN4MA", - }, - { - ThreadID: "01HCWE3P291Z3NJEJVFPW0K9ZQ", - StatusID: "01F8MHC0H0A7XHTVH5F596ZKBM", - }, - { - ThreadID: "01HCWE4P0EW9HBA5WHW97D5YV0", - StatusID: "01F8MHC8VWDRBQR0N1BATDDEM5", - }, - { - ThreadID: "01HCWDKKBWECZJQ93E262N36VN", - StatusID: "01FCQSQ667XHJ9AV9T27SJJSX5", - }, - { - ThreadID: "01HCWDKKBWECZJQ93E262N36VN", - StatusID: "01J2M1HPFSS54S60Y0KYV23KJE", - }, - { - ThreadID: "01HCWE71MGRRDSHBKXFD5DDSWR", - StatusID: "01FN3VJGFH10KR7S2PB0GFJZYG", - }, - { - ThreadID: "01HCWE7ZNC2SS4P05WA5QYED23", - StatusID: "01G20ZM733MGN8J344T4ZDDFY1", - }, - { - ThreadID: "01HCWE4P0EW9HBA5WHW97D5YV0", - StatusID: "01J5QVB9VC76NPPRQ207GG4DRZ", - }, - } -} - // NewTestMentions returns a map of gts model mentions keyed by their name. func NewTestMentions() map[string]*gtsmodel.Mention { return map[string]*gtsmodel.Mention{ diff --git a/vendor/codeberg.org/gruf/go-bitutil/LICENSE b/vendor/codeberg.org/gruf/go-bitutil/LICENSE new file mode 100644 index 000000000..e4163ae35 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/LICENSE @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2022 gruf + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/codeberg.org/gruf/go-bitutil/README.md b/vendor/codeberg.org/gruf/go-bitutil/README.md new file mode 100644 index 000000000..a71c1aa0b --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/README.md @@ -0,0 +1,3 @@ +# go-bitutil + +This library provides helpful methods and types for performing typical bitwise operations on integers, e.g. packing/unpacking, bit flags. \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-bitutil/abs.go b/vendor/codeberg.org/gruf/go-bitutil/abs.go new file mode 100644 index 000000000..f4ce8ad75 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/abs.go @@ -0,0 +1,29 @@ +package bitutil + +// Abs8 returns the absolute value of i (calculated without branching). +func Abs8(i int8) int8 { + const bits = 8 + u := uint64(i >> (bits - 1)) + return (i ^ int8(u)) + int8(u&1) +} + +// Abs16 returns the absolute value of i (calculated without branching). +func Abs16(i int16) int16 { + const bits = 16 + u := uint64(i >> (bits - 1)) + return (i ^ int16(u)) + int16(u&1) +} + +// Abs32 returns the absolute value of i (calculated without branching). +func Abs32(i int32) int32 { + const bits = 32 + u := uint64(i >> (bits - 1)) + return (i ^ int32(u)) + int32(u&1) +} + +// Abs64 returns the absolute value of i (calculated without branching). +func Abs64(i int64) int64 { + const bits = 64 + u := uint64(i >> (bits - 1)) + return (i ^ int64(u)) + int64(u&1) +} diff --git a/vendor/codeberg.org/gruf/go-bitutil/flag.go b/vendor/codeberg.org/gruf/go-bitutil/flag.go new file mode 100644 index 000000000..6a5b20d11 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/flag.go @@ -0,0 +1,3744 @@ +package bitutil + +import ( + "unsafe" +) + +// Flags8 is a type-casted unsigned integer with helper +// methods for easily managing up to 8 bit-flags. +type Flags8 uint8 + +// Get will fetch the flag bit value at index 'bit'. +func (f Flags8) Get(bit uint8) bool { + mask := Flags8(1) << bit + return (f&mask != 0) +} + +// Set will set the flag bit value at index 'bit'. +func (f Flags8) Set(bit uint8) Flags8 { + mask := Flags8(1) << bit + return f | mask +} + +// Unset will unset the flag bit value at index 'bit'. +func (f Flags8) Unset(bit uint8) Flags8 { + mask := Flags8(1) << bit + return f & ^mask +} + +// Get0 will fetch the flag bit value at index 0. +func (f Flags8) Get0() bool { + const mask = Flags8(1) << 0 + return (f&mask != 0) +} + +// Set0 will set the flag bit value at index 0. +func (f Flags8) Set0() Flags8 { + const mask = Flags8(1) << 0 + return f | mask +} + +// Unset0 will unset the flag bit value at index 0. +func (f Flags8) Unset0() Flags8 { + const mask = Flags8(1) << 0 + return f & ^mask +} + +// Get1 will fetch the flag bit value at index 1. +func (f Flags8) Get1() bool { + const mask = Flags8(1) << 1 + return (f&mask != 0) +} + +// Set1 will set the flag bit value at index 1. +func (f Flags8) Set1() Flags8 { + const mask = Flags8(1) << 1 + return f | mask +} + +// Unset1 will unset the flag bit value at index 1. +func (f Flags8) Unset1() Flags8 { + const mask = Flags8(1) << 1 + return f & ^mask +} + +// Get2 will fetch the flag bit value at index 2. +func (f Flags8) Get2() bool { + const mask = Flags8(1) << 2 + return (f&mask != 0) +} + +// Set2 will set the flag bit value at index 2. +func (f Flags8) Set2() Flags8 { + const mask = Flags8(1) << 2 + return f | mask +} + +// Unset2 will unset the flag bit value at index 2. +func (f Flags8) Unset2() Flags8 { + const mask = Flags8(1) << 2 + return f & ^mask +} + +// Get3 will fetch the flag bit value at index 3. +func (f Flags8) Get3() bool { + const mask = Flags8(1) << 3 + return (f&mask != 0) +} + +// Set3 will set the flag bit value at index 3. +func (f Flags8) Set3() Flags8 { + const mask = Flags8(1) << 3 + return f | mask +} + +// Unset3 will unset the flag bit value at index 3. +func (f Flags8) Unset3() Flags8 { + const mask = Flags8(1) << 3 + return f & ^mask +} + +// Get4 will fetch the flag bit value at index 4. +func (f Flags8) Get4() bool { + const mask = Flags8(1) << 4 + return (f&mask != 0) +} + +// Set4 will set the flag bit value at index 4. +func (f Flags8) Set4() Flags8 { + const mask = Flags8(1) << 4 + return f | mask +} + +// Unset4 will unset the flag bit value at index 4. +func (f Flags8) Unset4() Flags8 { + const mask = Flags8(1) << 4 + return f & ^mask +} + +// Get5 will fetch the flag bit value at index 5. +func (f Flags8) Get5() bool { + const mask = Flags8(1) << 5 + return (f&mask != 0) +} + +// Set5 will set the flag bit value at index 5. +func (f Flags8) Set5() Flags8 { + const mask = Flags8(1) << 5 + return f | mask +} + +// Unset5 will unset the flag bit value at index 5. +func (f Flags8) Unset5() Flags8 { + const mask = Flags8(1) << 5 + return f & ^mask +} + +// Get6 will fetch the flag bit value at index 6. +func (f Flags8) Get6() bool { + const mask = Flags8(1) << 6 + return (f&mask != 0) +} + +// Set6 will set the flag bit value at index 6. +func (f Flags8) Set6() Flags8 { + const mask = Flags8(1) << 6 + return f | mask +} + +// Unset6 will unset the flag bit value at index 6. +func (f Flags8) Unset6() Flags8 { + const mask = Flags8(1) << 6 + return f & ^mask +} + +// Get7 will fetch the flag bit value at index 7. +func (f Flags8) Get7() bool { + const mask = Flags8(1) << 7 + return (f&mask != 0) +} + +// Set7 will set the flag bit value at index 7. +func (f Flags8) Set7() Flags8 { + const mask = Flags8(1) << 7 + return f | mask +} + +// Unset7 will unset the flag bit value at index 7. +func (f Flags8) Unset7() Flags8 { + const mask = Flags8(1) << 7 + return f & ^mask +} + +// String returns a human readable representation of Flags8. +func (f Flags8) String() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = 1 + (len("false ") * 8) - 1 + 1 + buf = make([]byte, prealloc) + + buf[i] = '{' + i++ + + val = f.Get0() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// GoString returns a more verbose human readable representation of Flags8. +func (f Flags8) GoString() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = len("bitutil.Flags8{") + (len("7=false ") * 8) - 1 + 1 + buf = make([]byte, prealloc) + + i += copy(buf[i:], "bitutil.Flags8{") + + val = f.Get0() + i += copy(buf[i:], "0=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], "1=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], "2=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], "3=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], "4=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], "5=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], "6=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], "7=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// Flags16 is a type-casted unsigned integer with helper +// methods for easily managing up to 16 bit-flags. +type Flags16 uint16 + +// Get will fetch the flag bit value at index 'bit'. +func (f Flags16) Get(bit uint8) bool { + mask := Flags16(1) << bit + return (f&mask != 0) +} + +// Set will set the flag bit value at index 'bit'. +func (f Flags16) Set(bit uint8) Flags16 { + mask := Flags16(1) << bit + return f | mask +} + +// Unset will unset the flag bit value at index 'bit'. +func (f Flags16) Unset(bit uint8) Flags16 { + mask := Flags16(1) << bit + return f & ^mask +} + +// Get0 will fetch the flag bit value at index 0. +func (f Flags16) Get0() bool { + const mask = Flags16(1) << 0 + return (f&mask != 0) +} + +// Set0 will set the flag bit value at index 0. +func (f Flags16) Set0() Flags16 { + const mask = Flags16(1) << 0 + return f | mask +} + +// Unset0 will unset the flag bit value at index 0. +func (f Flags16) Unset0() Flags16 { + const mask = Flags16(1) << 0 + return f & ^mask +} + +// Get1 will fetch the flag bit value at index 1. +func (f Flags16) Get1() bool { + const mask = Flags16(1) << 1 + return (f&mask != 0) +} + +// Set1 will set the flag bit value at index 1. +func (f Flags16) Set1() Flags16 { + const mask = Flags16(1) << 1 + return f | mask +} + +// Unset1 will unset the flag bit value at index 1. +func (f Flags16) Unset1() Flags16 { + const mask = Flags16(1) << 1 + return f & ^mask +} + +// Get2 will fetch the flag bit value at index 2. +func (f Flags16) Get2() bool { + const mask = Flags16(1) << 2 + return (f&mask != 0) +} + +// Set2 will set the flag bit value at index 2. +func (f Flags16) Set2() Flags16 { + const mask = Flags16(1) << 2 + return f | mask +} + +// Unset2 will unset the flag bit value at index 2. +func (f Flags16) Unset2() Flags16 { + const mask = Flags16(1) << 2 + return f & ^mask +} + +// Get3 will fetch the flag bit value at index 3. +func (f Flags16) Get3() bool { + const mask = Flags16(1) << 3 + return (f&mask != 0) +} + +// Set3 will set the flag bit value at index 3. +func (f Flags16) Set3() Flags16 { + const mask = Flags16(1) << 3 + return f | mask +} + +// Unset3 will unset the flag bit value at index 3. +func (f Flags16) Unset3() Flags16 { + const mask = Flags16(1) << 3 + return f & ^mask +} + +// Get4 will fetch the flag bit value at index 4. +func (f Flags16) Get4() bool { + const mask = Flags16(1) << 4 + return (f&mask != 0) +} + +// Set4 will set the flag bit value at index 4. +func (f Flags16) Set4() Flags16 { + const mask = Flags16(1) << 4 + return f | mask +} + +// Unset4 will unset the flag bit value at index 4. +func (f Flags16) Unset4() Flags16 { + const mask = Flags16(1) << 4 + return f & ^mask +} + +// Get5 will fetch the flag bit value at index 5. +func (f Flags16) Get5() bool { + const mask = Flags16(1) << 5 + return (f&mask != 0) +} + +// Set5 will set the flag bit value at index 5. +func (f Flags16) Set5() Flags16 { + const mask = Flags16(1) << 5 + return f | mask +} + +// Unset5 will unset the flag bit value at index 5. +func (f Flags16) Unset5() Flags16 { + const mask = Flags16(1) << 5 + return f & ^mask +} + +// Get6 will fetch the flag bit value at index 6. +func (f Flags16) Get6() bool { + const mask = Flags16(1) << 6 + return (f&mask != 0) +} + +// Set6 will set the flag bit value at index 6. +func (f Flags16) Set6() Flags16 { + const mask = Flags16(1) << 6 + return f | mask +} + +// Unset6 will unset the flag bit value at index 6. +func (f Flags16) Unset6() Flags16 { + const mask = Flags16(1) << 6 + return f & ^mask +} + +// Get7 will fetch the flag bit value at index 7. +func (f Flags16) Get7() bool { + const mask = Flags16(1) << 7 + return (f&mask != 0) +} + +// Set7 will set the flag bit value at index 7. +func (f Flags16) Set7() Flags16 { + const mask = Flags16(1) << 7 + return f | mask +} + +// Unset7 will unset the flag bit value at index 7. +func (f Flags16) Unset7() Flags16 { + const mask = Flags16(1) << 7 + return f & ^mask +} + +// Get8 will fetch the flag bit value at index 8. +func (f Flags16) Get8() bool { + const mask = Flags16(1) << 8 + return (f&mask != 0) +} + +// Set8 will set the flag bit value at index 8. +func (f Flags16) Set8() Flags16 { + const mask = Flags16(1) << 8 + return f | mask +} + +// Unset8 will unset the flag bit value at index 8. +func (f Flags16) Unset8() Flags16 { + const mask = Flags16(1) << 8 + return f & ^mask +} + +// Get9 will fetch the flag bit value at index 9. +func (f Flags16) Get9() bool { + const mask = Flags16(1) << 9 + return (f&mask != 0) +} + +// Set9 will set the flag bit value at index 9. +func (f Flags16) Set9() Flags16 { + const mask = Flags16(1) << 9 + return f | mask +} + +// Unset9 will unset the flag bit value at index 9. +func (f Flags16) Unset9() Flags16 { + const mask = Flags16(1) << 9 + return f & ^mask +} + +// Get10 will fetch the flag bit value at index 10. +func (f Flags16) Get10() bool { + const mask = Flags16(1) << 10 + return (f&mask != 0) +} + +// Set10 will set the flag bit value at index 10. +func (f Flags16) Set10() Flags16 { + const mask = Flags16(1) << 10 + return f | mask +} + +// Unset10 will unset the flag bit value at index 10. +func (f Flags16) Unset10() Flags16 { + const mask = Flags16(1) << 10 + return f & ^mask +} + +// Get11 will fetch the flag bit value at index 11. +func (f Flags16) Get11() bool { + const mask = Flags16(1) << 11 + return (f&mask != 0) +} + +// Set11 will set the flag bit value at index 11. +func (f Flags16) Set11() Flags16 { + const mask = Flags16(1) << 11 + return f | mask +} + +// Unset11 will unset the flag bit value at index 11. +func (f Flags16) Unset11() Flags16 { + const mask = Flags16(1) << 11 + return f & ^mask +} + +// Get12 will fetch the flag bit value at index 12. +func (f Flags16) Get12() bool { + const mask = Flags16(1) << 12 + return (f&mask != 0) +} + +// Set12 will set the flag bit value at index 12. +func (f Flags16) Set12() Flags16 { + const mask = Flags16(1) << 12 + return f | mask +} + +// Unset12 will unset the flag bit value at index 12. +func (f Flags16) Unset12() Flags16 { + const mask = Flags16(1) << 12 + return f & ^mask +} + +// Get13 will fetch the flag bit value at index 13. +func (f Flags16) Get13() bool { + const mask = Flags16(1) << 13 + return (f&mask != 0) +} + +// Set13 will set the flag bit value at index 13. +func (f Flags16) Set13() Flags16 { + const mask = Flags16(1) << 13 + return f | mask +} + +// Unset13 will unset the flag bit value at index 13. +func (f Flags16) Unset13() Flags16 { + const mask = Flags16(1) << 13 + return f & ^mask +} + +// Get14 will fetch the flag bit value at index 14. +func (f Flags16) Get14() bool { + const mask = Flags16(1) << 14 + return (f&mask != 0) +} + +// Set14 will set the flag bit value at index 14. +func (f Flags16) Set14() Flags16 { + const mask = Flags16(1) << 14 + return f | mask +} + +// Unset14 will unset the flag bit value at index 14. +func (f Flags16) Unset14() Flags16 { + const mask = Flags16(1) << 14 + return f & ^mask +} + +// Get15 will fetch the flag bit value at index 15. +func (f Flags16) Get15() bool { + const mask = Flags16(1) << 15 + return (f&mask != 0) +} + +// Set15 will set the flag bit value at index 15. +func (f Flags16) Set15() Flags16 { + const mask = Flags16(1) << 15 + return f | mask +} + +// Unset15 will unset the flag bit value at index 15. +func (f Flags16) Unset15() Flags16 { + const mask = Flags16(1) << 15 + return f & ^mask +} + +// String returns a human readable representation of Flags16. +func (f Flags16) String() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = 1 + (len("false ") * 16) - 1 + 1 + buf = make([]byte, prealloc) + + buf[i] = '{' + i++ + + val = f.Get0() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get8() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get9() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get10() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get11() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get12() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get13() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get14() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get15() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// GoString returns a more verbose human readable representation of Flags16. +func (f Flags16) GoString() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = len("bitutil.Flags16{") + (len("15=false ") * 16) - 1 + 1 + buf = make([]byte, prealloc) + + i += copy(buf[i:], "bitutil.Flags16{") + + val = f.Get0() + i += copy(buf[i:], "0=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], "1=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], "2=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], "3=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], "4=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], "5=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], "6=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], "7=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get8() + i += copy(buf[i:], "8=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get9() + i += copy(buf[i:], "9=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get10() + i += copy(buf[i:], "10=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get11() + i += copy(buf[i:], "11=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get12() + i += copy(buf[i:], "12=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get13() + i += copy(buf[i:], "13=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get14() + i += copy(buf[i:], "14=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get15() + i += copy(buf[i:], "15=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// Flags32 is a type-casted unsigned integer with helper +// methods for easily managing up to 32 bit-flags. +type Flags32 uint32 + +// Get will fetch the flag bit value at index 'bit'. +func (f Flags32) Get(bit uint8) bool { + mask := Flags32(1) << bit + return (f&mask != 0) +} + +// Set will set the flag bit value at index 'bit'. +func (f Flags32) Set(bit uint8) Flags32 { + mask := Flags32(1) << bit + return f | mask +} + +// Unset will unset the flag bit value at index 'bit'. +func (f Flags32) Unset(bit uint8) Flags32 { + mask := Flags32(1) << bit + return f & ^mask +} + +// Get0 will fetch the flag bit value at index 0. +func (f Flags32) Get0() bool { + const mask = Flags32(1) << 0 + return (f&mask != 0) +} + +// Set0 will set the flag bit value at index 0. +func (f Flags32) Set0() Flags32 { + const mask = Flags32(1) << 0 + return f | mask +} + +// Unset0 will unset the flag bit value at index 0. +func (f Flags32) Unset0() Flags32 { + const mask = Flags32(1) << 0 + return f & ^mask +} + +// Get1 will fetch the flag bit value at index 1. +func (f Flags32) Get1() bool { + const mask = Flags32(1) << 1 + return (f&mask != 0) +} + +// Set1 will set the flag bit value at index 1. +func (f Flags32) Set1() Flags32 { + const mask = Flags32(1) << 1 + return f | mask +} + +// Unset1 will unset the flag bit value at index 1. +func (f Flags32) Unset1() Flags32 { + const mask = Flags32(1) << 1 + return f & ^mask +} + +// Get2 will fetch the flag bit value at index 2. +func (f Flags32) Get2() bool { + const mask = Flags32(1) << 2 + return (f&mask != 0) +} + +// Set2 will set the flag bit value at index 2. +func (f Flags32) Set2() Flags32 { + const mask = Flags32(1) << 2 + return f | mask +} + +// Unset2 will unset the flag bit value at index 2. +func (f Flags32) Unset2() Flags32 { + const mask = Flags32(1) << 2 + return f & ^mask +} + +// Get3 will fetch the flag bit value at index 3. +func (f Flags32) Get3() bool { + const mask = Flags32(1) << 3 + return (f&mask != 0) +} + +// Set3 will set the flag bit value at index 3. +func (f Flags32) Set3() Flags32 { + const mask = Flags32(1) << 3 + return f | mask +} + +// Unset3 will unset the flag bit value at index 3. +func (f Flags32) Unset3() Flags32 { + const mask = Flags32(1) << 3 + return f & ^mask +} + +// Get4 will fetch the flag bit value at index 4. +func (f Flags32) Get4() bool { + const mask = Flags32(1) << 4 + return (f&mask != 0) +} + +// Set4 will set the flag bit value at index 4. +func (f Flags32) Set4() Flags32 { + const mask = Flags32(1) << 4 + return f | mask +} + +// Unset4 will unset the flag bit value at index 4. +func (f Flags32) Unset4() Flags32 { + const mask = Flags32(1) << 4 + return f & ^mask +} + +// Get5 will fetch the flag bit value at index 5. +func (f Flags32) Get5() bool { + const mask = Flags32(1) << 5 + return (f&mask != 0) +} + +// Set5 will set the flag bit value at index 5. +func (f Flags32) Set5() Flags32 { + const mask = Flags32(1) << 5 + return f | mask +} + +// Unset5 will unset the flag bit value at index 5. +func (f Flags32) Unset5() Flags32 { + const mask = Flags32(1) << 5 + return f & ^mask +} + +// Get6 will fetch the flag bit value at index 6. +func (f Flags32) Get6() bool { + const mask = Flags32(1) << 6 + return (f&mask != 0) +} + +// Set6 will set the flag bit value at index 6. +func (f Flags32) Set6() Flags32 { + const mask = Flags32(1) << 6 + return f | mask +} + +// Unset6 will unset the flag bit value at index 6. +func (f Flags32) Unset6() Flags32 { + const mask = Flags32(1) << 6 + return f & ^mask +} + +// Get7 will fetch the flag bit value at index 7. +func (f Flags32) Get7() bool { + const mask = Flags32(1) << 7 + return (f&mask != 0) +} + +// Set7 will set the flag bit value at index 7. +func (f Flags32) Set7() Flags32 { + const mask = Flags32(1) << 7 + return f | mask +} + +// Unset7 will unset the flag bit value at index 7. +func (f Flags32) Unset7() Flags32 { + const mask = Flags32(1) << 7 + return f & ^mask +} + +// Get8 will fetch the flag bit value at index 8. +func (f Flags32) Get8() bool { + const mask = Flags32(1) << 8 + return (f&mask != 0) +} + +// Set8 will set the flag bit value at index 8. +func (f Flags32) Set8() Flags32 { + const mask = Flags32(1) << 8 + return f | mask +} + +// Unset8 will unset the flag bit value at index 8. +func (f Flags32) Unset8() Flags32 { + const mask = Flags32(1) << 8 + return f & ^mask +} + +// Get9 will fetch the flag bit value at index 9. +func (f Flags32) Get9() bool { + const mask = Flags32(1) << 9 + return (f&mask != 0) +} + +// Set9 will set the flag bit value at index 9. +func (f Flags32) Set9() Flags32 { + const mask = Flags32(1) << 9 + return f | mask +} + +// Unset9 will unset the flag bit value at index 9. +func (f Flags32) Unset9() Flags32 { + const mask = Flags32(1) << 9 + return f & ^mask +} + +// Get10 will fetch the flag bit value at index 10. +func (f Flags32) Get10() bool { + const mask = Flags32(1) << 10 + return (f&mask != 0) +} + +// Set10 will set the flag bit value at index 10. +func (f Flags32) Set10() Flags32 { + const mask = Flags32(1) << 10 + return f | mask +} + +// Unset10 will unset the flag bit value at index 10. +func (f Flags32) Unset10() Flags32 { + const mask = Flags32(1) << 10 + return f & ^mask +} + +// Get11 will fetch the flag bit value at index 11. +func (f Flags32) Get11() bool { + const mask = Flags32(1) << 11 + return (f&mask != 0) +} + +// Set11 will set the flag bit value at index 11. +func (f Flags32) Set11() Flags32 { + const mask = Flags32(1) << 11 + return f | mask +} + +// Unset11 will unset the flag bit value at index 11. +func (f Flags32) Unset11() Flags32 { + const mask = Flags32(1) << 11 + return f & ^mask +} + +// Get12 will fetch the flag bit value at index 12. +func (f Flags32) Get12() bool { + const mask = Flags32(1) << 12 + return (f&mask != 0) +} + +// Set12 will set the flag bit value at index 12. +func (f Flags32) Set12() Flags32 { + const mask = Flags32(1) << 12 + return f | mask +} + +// Unset12 will unset the flag bit value at index 12. +func (f Flags32) Unset12() Flags32 { + const mask = Flags32(1) << 12 + return f & ^mask +} + +// Get13 will fetch the flag bit value at index 13. +func (f Flags32) Get13() bool { + const mask = Flags32(1) << 13 + return (f&mask != 0) +} + +// Set13 will set the flag bit value at index 13. +func (f Flags32) Set13() Flags32 { + const mask = Flags32(1) << 13 + return f | mask +} + +// Unset13 will unset the flag bit value at index 13. +func (f Flags32) Unset13() Flags32 { + const mask = Flags32(1) << 13 + return f & ^mask +} + +// Get14 will fetch the flag bit value at index 14. +func (f Flags32) Get14() bool { + const mask = Flags32(1) << 14 + return (f&mask != 0) +} + +// Set14 will set the flag bit value at index 14. +func (f Flags32) Set14() Flags32 { + const mask = Flags32(1) << 14 + return f | mask +} + +// Unset14 will unset the flag bit value at index 14. +func (f Flags32) Unset14() Flags32 { + const mask = Flags32(1) << 14 + return f & ^mask +} + +// Get15 will fetch the flag bit value at index 15. +func (f Flags32) Get15() bool { + const mask = Flags32(1) << 15 + return (f&mask != 0) +} + +// Set15 will set the flag bit value at index 15. +func (f Flags32) Set15() Flags32 { + const mask = Flags32(1) << 15 + return f | mask +} + +// Unset15 will unset the flag bit value at index 15. +func (f Flags32) Unset15() Flags32 { + const mask = Flags32(1) << 15 + return f & ^mask +} + +// Get16 will fetch the flag bit value at index 16. +func (f Flags32) Get16() bool { + const mask = Flags32(1) << 16 + return (f&mask != 0) +} + +// Set16 will set the flag bit value at index 16. +func (f Flags32) Set16() Flags32 { + const mask = Flags32(1) << 16 + return f | mask +} + +// Unset16 will unset the flag bit value at index 16. +func (f Flags32) Unset16() Flags32 { + const mask = Flags32(1) << 16 + return f & ^mask +} + +// Get17 will fetch the flag bit value at index 17. +func (f Flags32) Get17() bool { + const mask = Flags32(1) << 17 + return (f&mask != 0) +} + +// Set17 will set the flag bit value at index 17. +func (f Flags32) Set17() Flags32 { + const mask = Flags32(1) << 17 + return f | mask +} + +// Unset17 will unset the flag bit value at index 17. +func (f Flags32) Unset17() Flags32 { + const mask = Flags32(1) << 17 + return f & ^mask +} + +// Get18 will fetch the flag bit value at index 18. +func (f Flags32) Get18() bool { + const mask = Flags32(1) << 18 + return (f&mask != 0) +} + +// Set18 will set the flag bit value at index 18. +func (f Flags32) Set18() Flags32 { + const mask = Flags32(1) << 18 + return f | mask +} + +// Unset18 will unset the flag bit value at index 18. +func (f Flags32) Unset18() Flags32 { + const mask = Flags32(1) << 18 + return f & ^mask +} + +// Get19 will fetch the flag bit value at index 19. +func (f Flags32) Get19() bool { + const mask = Flags32(1) << 19 + return (f&mask != 0) +} + +// Set19 will set the flag bit value at index 19. +func (f Flags32) Set19() Flags32 { + const mask = Flags32(1) << 19 + return f | mask +} + +// Unset19 will unset the flag bit value at index 19. +func (f Flags32) Unset19() Flags32 { + const mask = Flags32(1) << 19 + return f & ^mask +} + +// Get20 will fetch the flag bit value at index 20. +func (f Flags32) Get20() bool { + const mask = Flags32(1) << 20 + return (f&mask != 0) +} + +// Set20 will set the flag bit value at index 20. +func (f Flags32) Set20() Flags32 { + const mask = Flags32(1) << 20 + return f | mask +} + +// Unset20 will unset the flag bit value at index 20. +func (f Flags32) Unset20() Flags32 { + const mask = Flags32(1) << 20 + return f & ^mask +} + +// Get21 will fetch the flag bit value at index 21. +func (f Flags32) Get21() bool { + const mask = Flags32(1) << 21 + return (f&mask != 0) +} + +// Set21 will set the flag bit value at index 21. +func (f Flags32) Set21() Flags32 { + const mask = Flags32(1) << 21 + return f | mask +} + +// Unset21 will unset the flag bit value at index 21. +func (f Flags32) Unset21() Flags32 { + const mask = Flags32(1) << 21 + return f & ^mask +} + +// Get22 will fetch the flag bit value at index 22. +func (f Flags32) Get22() bool { + const mask = Flags32(1) << 22 + return (f&mask != 0) +} + +// Set22 will set the flag bit value at index 22. +func (f Flags32) Set22() Flags32 { + const mask = Flags32(1) << 22 + return f | mask +} + +// Unset22 will unset the flag bit value at index 22. +func (f Flags32) Unset22() Flags32 { + const mask = Flags32(1) << 22 + return f & ^mask +} + +// Get23 will fetch the flag bit value at index 23. +func (f Flags32) Get23() bool { + const mask = Flags32(1) << 23 + return (f&mask != 0) +} + +// Set23 will set the flag bit value at index 23. +func (f Flags32) Set23() Flags32 { + const mask = Flags32(1) << 23 + return f | mask +} + +// Unset23 will unset the flag bit value at index 23. +func (f Flags32) Unset23() Flags32 { + const mask = Flags32(1) << 23 + return f & ^mask +} + +// Get24 will fetch the flag bit value at index 24. +func (f Flags32) Get24() bool { + const mask = Flags32(1) << 24 + return (f&mask != 0) +} + +// Set24 will set the flag bit value at index 24. +func (f Flags32) Set24() Flags32 { + const mask = Flags32(1) << 24 + return f | mask +} + +// Unset24 will unset the flag bit value at index 24. +func (f Flags32) Unset24() Flags32 { + const mask = Flags32(1) << 24 + return f & ^mask +} + +// Get25 will fetch the flag bit value at index 25. +func (f Flags32) Get25() bool { + const mask = Flags32(1) << 25 + return (f&mask != 0) +} + +// Set25 will set the flag bit value at index 25. +func (f Flags32) Set25() Flags32 { + const mask = Flags32(1) << 25 + return f | mask +} + +// Unset25 will unset the flag bit value at index 25. +func (f Flags32) Unset25() Flags32 { + const mask = Flags32(1) << 25 + return f & ^mask +} + +// Get26 will fetch the flag bit value at index 26. +func (f Flags32) Get26() bool { + const mask = Flags32(1) << 26 + return (f&mask != 0) +} + +// Set26 will set the flag bit value at index 26. +func (f Flags32) Set26() Flags32 { + const mask = Flags32(1) << 26 + return f | mask +} + +// Unset26 will unset the flag bit value at index 26. +func (f Flags32) Unset26() Flags32 { + const mask = Flags32(1) << 26 + return f & ^mask +} + +// Get27 will fetch the flag bit value at index 27. +func (f Flags32) Get27() bool { + const mask = Flags32(1) << 27 + return (f&mask != 0) +} + +// Set27 will set the flag bit value at index 27. +func (f Flags32) Set27() Flags32 { + const mask = Flags32(1) << 27 + return f | mask +} + +// Unset27 will unset the flag bit value at index 27. +func (f Flags32) Unset27() Flags32 { + const mask = Flags32(1) << 27 + return f & ^mask +} + +// Get28 will fetch the flag bit value at index 28. +func (f Flags32) Get28() bool { + const mask = Flags32(1) << 28 + return (f&mask != 0) +} + +// Set28 will set the flag bit value at index 28. +func (f Flags32) Set28() Flags32 { + const mask = Flags32(1) << 28 + return f | mask +} + +// Unset28 will unset the flag bit value at index 28. +func (f Flags32) Unset28() Flags32 { + const mask = Flags32(1) << 28 + return f & ^mask +} + +// Get29 will fetch the flag bit value at index 29. +func (f Flags32) Get29() bool { + const mask = Flags32(1) << 29 + return (f&mask != 0) +} + +// Set29 will set the flag bit value at index 29. +func (f Flags32) Set29() Flags32 { + const mask = Flags32(1) << 29 + return f | mask +} + +// Unset29 will unset the flag bit value at index 29. +func (f Flags32) Unset29() Flags32 { + const mask = Flags32(1) << 29 + return f & ^mask +} + +// Get30 will fetch the flag bit value at index 30. +func (f Flags32) Get30() bool { + const mask = Flags32(1) << 30 + return (f&mask != 0) +} + +// Set30 will set the flag bit value at index 30. +func (f Flags32) Set30() Flags32 { + const mask = Flags32(1) << 30 + return f | mask +} + +// Unset30 will unset the flag bit value at index 30. +func (f Flags32) Unset30() Flags32 { + const mask = Flags32(1) << 30 + return f & ^mask +} + +// Get31 will fetch the flag bit value at index 31. +func (f Flags32) Get31() bool { + const mask = Flags32(1) << 31 + return (f&mask != 0) +} + +// Set31 will set the flag bit value at index 31. +func (f Flags32) Set31() Flags32 { + const mask = Flags32(1) << 31 + return f | mask +} + +// Unset31 will unset the flag bit value at index 31. +func (f Flags32) Unset31() Flags32 { + const mask = Flags32(1) << 31 + return f & ^mask +} + +// String returns a human readable representation of Flags32. +func (f Flags32) String() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = 1 + (len("false ") * 32) - 1 + 1 + buf = make([]byte, prealloc) + + buf[i] = '{' + i++ + + val = f.Get0() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get8() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get9() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get10() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get11() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get12() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get13() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get14() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get15() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get16() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get17() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get18() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get19() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get20() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get21() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get22() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get23() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get24() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get25() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get26() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get27() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get28() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get29() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get30() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get31() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// GoString returns a more verbose human readable representation of Flags32. +func (f Flags32) GoString() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = len("bitutil.Flags32{") + (len("31=false ") * 32) - 1 + 1 + buf = make([]byte, prealloc) + + i += copy(buf[i:], "bitutil.Flags32{") + + val = f.Get0() + i += copy(buf[i:], "0=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], "1=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], "2=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], "3=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], "4=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], "5=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], "6=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], "7=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get8() + i += copy(buf[i:], "8=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get9() + i += copy(buf[i:], "9=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get10() + i += copy(buf[i:], "10=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get11() + i += copy(buf[i:], "11=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get12() + i += copy(buf[i:], "12=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get13() + i += copy(buf[i:], "13=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get14() + i += copy(buf[i:], "14=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get15() + i += copy(buf[i:], "15=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get16() + i += copy(buf[i:], "16=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get17() + i += copy(buf[i:], "17=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get18() + i += copy(buf[i:], "18=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get19() + i += copy(buf[i:], "19=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get20() + i += copy(buf[i:], "20=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get21() + i += copy(buf[i:], "21=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get22() + i += copy(buf[i:], "22=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get23() + i += copy(buf[i:], "23=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get24() + i += copy(buf[i:], "24=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get25() + i += copy(buf[i:], "25=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get26() + i += copy(buf[i:], "26=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get27() + i += copy(buf[i:], "27=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get28() + i += copy(buf[i:], "28=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get29() + i += copy(buf[i:], "29=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get30() + i += copy(buf[i:], "30=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get31() + i += copy(buf[i:], "31=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// Flags64 is a type-casted unsigned integer with helper +// methods for easily managing up to 64 bit-flags. +type Flags64 uint64 + +// Get will fetch the flag bit value at index 'bit'. +func (f Flags64) Get(bit uint8) bool { + mask := Flags64(1) << bit + return (f&mask != 0) +} + +// Set will set the flag bit value at index 'bit'. +func (f Flags64) Set(bit uint8) Flags64 { + mask := Flags64(1) << bit + return f | mask +} + +// Unset will unset the flag bit value at index 'bit'. +func (f Flags64) Unset(bit uint8) Flags64 { + mask := Flags64(1) << bit + return f & ^mask +} + +// Get0 will fetch the flag bit value at index 0. +func (f Flags64) Get0() bool { + const mask = Flags64(1) << 0 + return (f&mask != 0) +} + +// Set0 will set the flag bit value at index 0. +func (f Flags64) Set0() Flags64 { + const mask = Flags64(1) << 0 + return f | mask +} + +// Unset0 will unset the flag bit value at index 0. +func (f Flags64) Unset0() Flags64 { + const mask = Flags64(1) << 0 + return f & ^mask +} + +// Get1 will fetch the flag bit value at index 1. +func (f Flags64) Get1() bool { + const mask = Flags64(1) << 1 + return (f&mask != 0) +} + +// Set1 will set the flag bit value at index 1. +func (f Flags64) Set1() Flags64 { + const mask = Flags64(1) << 1 + return f | mask +} + +// Unset1 will unset the flag bit value at index 1. +func (f Flags64) Unset1() Flags64 { + const mask = Flags64(1) << 1 + return f & ^mask +} + +// Get2 will fetch the flag bit value at index 2. +func (f Flags64) Get2() bool { + const mask = Flags64(1) << 2 + return (f&mask != 0) +} + +// Set2 will set the flag bit value at index 2. +func (f Flags64) Set2() Flags64 { + const mask = Flags64(1) << 2 + return f | mask +} + +// Unset2 will unset the flag bit value at index 2. +func (f Flags64) Unset2() Flags64 { + const mask = Flags64(1) << 2 + return f & ^mask +} + +// Get3 will fetch the flag bit value at index 3. +func (f Flags64) Get3() bool { + const mask = Flags64(1) << 3 + return (f&mask != 0) +} + +// Set3 will set the flag bit value at index 3. +func (f Flags64) Set3() Flags64 { + const mask = Flags64(1) << 3 + return f | mask +} + +// Unset3 will unset the flag bit value at index 3. +func (f Flags64) Unset3() Flags64 { + const mask = Flags64(1) << 3 + return f & ^mask +} + +// Get4 will fetch the flag bit value at index 4. +func (f Flags64) Get4() bool { + const mask = Flags64(1) << 4 + return (f&mask != 0) +} + +// Set4 will set the flag bit value at index 4. +func (f Flags64) Set4() Flags64 { + const mask = Flags64(1) << 4 + return f | mask +} + +// Unset4 will unset the flag bit value at index 4. +func (f Flags64) Unset4() Flags64 { + const mask = Flags64(1) << 4 + return f & ^mask +} + +// Get5 will fetch the flag bit value at index 5. +func (f Flags64) Get5() bool { + const mask = Flags64(1) << 5 + return (f&mask != 0) +} + +// Set5 will set the flag bit value at index 5. +func (f Flags64) Set5() Flags64 { + const mask = Flags64(1) << 5 + return f | mask +} + +// Unset5 will unset the flag bit value at index 5. +func (f Flags64) Unset5() Flags64 { + const mask = Flags64(1) << 5 + return f & ^mask +} + +// Get6 will fetch the flag bit value at index 6. +func (f Flags64) Get6() bool { + const mask = Flags64(1) << 6 + return (f&mask != 0) +} + +// Set6 will set the flag bit value at index 6. +func (f Flags64) Set6() Flags64 { + const mask = Flags64(1) << 6 + return f | mask +} + +// Unset6 will unset the flag bit value at index 6. +func (f Flags64) Unset6() Flags64 { + const mask = Flags64(1) << 6 + return f & ^mask +} + +// Get7 will fetch the flag bit value at index 7. +func (f Flags64) Get7() bool { + const mask = Flags64(1) << 7 + return (f&mask != 0) +} + +// Set7 will set the flag bit value at index 7. +func (f Flags64) Set7() Flags64 { + const mask = Flags64(1) << 7 + return f | mask +} + +// Unset7 will unset the flag bit value at index 7. +func (f Flags64) Unset7() Flags64 { + const mask = Flags64(1) << 7 + return f & ^mask +} + +// Get8 will fetch the flag bit value at index 8. +func (f Flags64) Get8() bool { + const mask = Flags64(1) << 8 + return (f&mask != 0) +} + +// Set8 will set the flag bit value at index 8. +func (f Flags64) Set8() Flags64 { + const mask = Flags64(1) << 8 + return f | mask +} + +// Unset8 will unset the flag bit value at index 8. +func (f Flags64) Unset8() Flags64 { + const mask = Flags64(1) << 8 + return f & ^mask +} + +// Get9 will fetch the flag bit value at index 9. +func (f Flags64) Get9() bool { + const mask = Flags64(1) << 9 + return (f&mask != 0) +} + +// Set9 will set the flag bit value at index 9. +func (f Flags64) Set9() Flags64 { + const mask = Flags64(1) << 9 + return f | mask +} + +// Unset9 will unset the flag bit value at index 9. +func (f Flags64) Unset9() Flags64 { + const mask = Flags64(1) << 9 + return f & ^mask +} + +// Get10 will fetch the flag bit value at index 10. +func (f Flags64) Get10() bool { + const mask = Flags64(1) << 10 + return (f&mask != 0) +} + +// Set10 will set the flag bit value at index 10. +func (f Flags64) Set10() Flags64 { + const mask = Flags64(1) << 10 + return f | mask +} + +// Unset10 will unset the flag bit value at index 10. +func (f Flags64) Unset10() Flags64 { + const mask = Flags64(1) << 10 + return f & ^mask +} + +// Get11 will fetch the flag bit value at index 11. +func (f Flags64) Get11() bool { + const mask = Flags64(1) << 11 + return (f&mask != 0) +} + +// Set11 will set the flag bit value at index 11. +func (f Flags64) Set11() Flags64 { + const mask = Flags64(1) << 11 + return f | mask +} + +// Unset11 will unset the flag bit value at index 11. +func (f Flags64) Unset11() Flags64 { + const mask = Flags64(1) << 11 + return f & ^mask +} + +// Get12 will fetch the flag bit value at index 12. +func (f Flags64) Get12() bool { + const mask = Flags64(1) << 12 + return (f&mask != 0) +} + +// Set12 will set the flag bit value at index 12. +func (f Flags64) Set12() Flags64 { + const mask = Flags64(1) << 12 + return f | mask +} + +// Unset12 will unset the flag bit value at index 12. +func (f Flags64) Unset12() Flags64 { + const mask = Flags64(1) << 12 + return f & ^mask +} + +// Get13 will fetch the flag bit value at index 13. +func (f Flags64) Get13() bool { + const mask = Flags64(1) << 13 + return (f&mask != 0) +} + +// Set13 will set the flag bit value at index 13. +func (f Flags64) Set13() Flags64 { + const mask = Flags64(1) << 13 + return f | mask +} + +// Unset13 will unset the flag bit value at index 13. +func (f Flags64) Unset13() Flags64 { + const mask = Flags64(1) << 13 + return f & ^mask +} + +// Get14 will fetch the flag bit value at index 14. +func (f Flags64) Get14() bool { + const mask = Flags64(1) << 14 + return (f&mask != 0) +} + +// Set14 will set the flag bit value at index 14. +func (f Flags64) Set14() Flags64 { + const mask = Flags64(1) << 14 + return f | mask +} + +// Unset14 will unset the flag bit value at index 14. +func (f Flags64) Unset14() Flags64 { + const mask = Flags64(1) << 14 + return f & ^mask +} + +// Get15 will fetch the flag bit value at index 15. +func (f Flags64) Get15() bool { + const mask = Flags64(1) << 15 + return (f&mask != 0) +} + +// Set15 will set the flag bit value at index 15. +func (f Flags64) Set15() Flags64 { + const mask = Flags64(1) << 15 + return f | mask +} + +// Unset15 will unset the flag bit value at index 15. +func (f Flags64) Unset15() Flags64 { + const mask = Flags64(1) << 15 + return f & ^mask +} + +// Get16 will fetch the flag bit value at index 16. +func (f Flags64) Get16() bool { + const mask = Flags64(1) << 16 + return (f&mask != 0) +} + +// Set16 will set the flag bit value at index 16. +func (f Flags64) Set16() Flags64 { + const mask = Flags64(1) << 16 + return f | mask +} + +// Unset16 will unset the flag bit value at index 16. +func (f Flags64) Unset16() Flags64 { + const mask = Flags64(1) << 16 + return f & ^mask +} + +// Get17 will fetch the flag bit value at index 17. +func (f Flags64) Get17() bool { + const mask = Flags64(1) << 17 + return (f&mask != 0) +} + +// Set17 will set the flag bit value at index 17. +func (f Flags64) Set17() Flags64 { + const mask = Flags64(1) << 17 + return f | mask +} + +// Unset17 will unset the flag bit value at index 17. +func (f Flags64) Unset17() Flags64 { + const mask = Flags64(1) << 17 + return f & ^mask +} + +// Get18 will fetch the flag bit value at index 18. +func (f Flags64) Get18() bool { + const mask = Flags64(1) << 18 + return (f&mask != 0) +} + +// Set18 will set the flag bit value at index 18. +func (f Flags64) Set18() Flags64 { + const mask = Flags64(1) << 18 + return f | mask +} + +// Unset18 will unset the flag bit value at index 18. +func (f Flags64) Unset18() Flags64 { + const mask = Flags64(1) << 18 + return f & ^mask +} + +// Get19 will fetch the flag bit value at index 19. +func (f Flags64) Get19() bool { + const mask = Flags64(1) << 19 + return (f&mask != 0) +} + +// Set19 will set the flag bit value at index 19. +func (f Flags64) Set19() Flags64 { + const mask = Flags64(1) << 19 + return f | mask +} + +// Unset19 will unset the flag bit value at index 19. +func (f Flags64) Unset19() Flags64 { + const mask = Flags64(1) << 19 + return f & ^mask +} + +// Get20 will fetch the flag bit value at index 20. +func (f Flags64) Get20() bool { + const mask = Flags64(1) << 20 + return (f&mask != 0) +} + +// Set20 will set the flag bit value at index 20. +func (f Flags64) Set20() Flags64 { + const mask = Flags64(1) << 20 + return f | mask +} + +// Unset20 will unset the flag bit value at index 20. +func (f Flags64) Unset20() Flags64 { + const mask = Flags64(1) << 20 + return f & ^mask +} + +// Get21 will fetch the flag bit value at index 21. +func (f Flags64) Get21() bool { + const mask = Flags64(1) << 21 + return (f&mask != 0) +} + +// Set21 will set the flag bit value at index 21. +func (f Flags64) Set21() Flags64 { + const mask = Flags64(1) << 21 + return f | mask +} + +// Unset21 will unset the flag bit value at index 21. +func (f Flags64) Unset21() Flags64 { + const mask = Flags64(1) << 21 + return f & ^mask +} + +// Get22 will fetch the flag bit value at index 22. +func (f Flags64) Get22() bool { + const mask = Flags64(1) << 22 + return (f&mask != 0) +} + +// Set22 will set the flag bit value at index 22. +func (f Flags64) Set22() Flags64 { + const mask = Flags64(1) << 22 + return f | mask +} + +// Unset22 will unset the flag bit value at index 22. +func (f Flags64) Unset22() Flags64 { + const mask = Flags64(1) << 22 + return f & ^mask +} + +// Get23 will fetch the flag bit value at index 23. +func (f Flags64) Get23() bool { + const mask = Flags64(1) << 23 + return (f&mask != 0) +} + +// Set23 will set the flag bit value at index 23. +func (f Flags64) Set23() Flags64 { + const mask = Flags64(1) << 23 + return f | mask +} + +// Unset23 will unset the flag bit value at index 23. +func (f Flags64) Unset23() Flags64 { + const mask = Flags64(1) << 23 + return f & ^mask +} + +// Get24 will fetch the flag bit value at index 24. +func (f Flags64) Get24() bool { + const mask = Flags64(1) << 24 + return (f&mask != 0) +} + +// Set24 will set the flag bit value at index 24. +func (f Flags64) Set24() Flags64 { + const mask = Flags64(1) << 24 + return f | mask +} + +// Unset24 will unset the flag bit value at index 24. +func (f Flags64) Unset24() Flags64 { + const mask = Flags64(1) << 24 + return f & ^mask +} + +// Get25 will fetch the flag bit value at index 25. +func (f Flags64) Get25() bool { + const mask = Flags64(1) << 25 + return (f&mask != 0) +} + +// Set25 will set the flag bit value at index 25. +func (f Flags64) Set25() Flags64 { + const mask = Flags64(1) << 25 + return f | mask +} + +// Unset25 will unset the flag bit value at index 25. +func (f Flags64) Unset25() Flags64 { + const mask = Flags64(1) << 25 + return f & ^mask +} + +// Get26 will fetch the flag bit value at index 26. +func (f Flags64) Get26() bool { + const mask = Flags64(1) << 26 + return (f&mask != 0) +} + +// Set26 will set the flag bit value at index 26. +func (f Flags64) Set26() Flags64 { + const mask = Flags64(1) << 26 + return f | mask +} + +// Unset26 will unset the flag bit value at index 26. +func (f Flags64) Unset26() Flags64 { + const mask = Flags64(1) << 26 + return f & ^mask +} + +// Get27 will fetch the flag bit value at index 27. +func (f Flags64) Get27() bool { + const mask = Flags64(1) << 27 + return (f&mask != 0) +} + +// Set27 will set the flag bit value at index 27. +func (f Flags64) Set27() Flags64 { + const mask = Flags64(1) << 27 + return f | mask +} + +// Unset27 will unset the flag bit value at index 27. +func (f Flags64) Unset27() Flags64 { + const mask = Flags64(1) << 27 + return f & ^mask +} + +// Get28 will fetch the flag bit value at index 28. +func (f Flags64) Get28() bool { + const mask = Flags64(1) << 28 + return (f&mask != 0) +} + +// Set28 will set the flag bit value at index 28. +func (f Flags64) Set28() Flags64 { + const mask = Flags64(1) << 28 + return f | mask +} + +// Unset28 will unset the flag bit value at index 28. +func (f Flags64) Unset28() Flags64 { + const mask = Flags64(1) << 28 + return f & ^mask +} + +// Get29 will fetch the flag bit value at index 29. +func (f Flags64) Get29() bool { + const mask = Flags64(1) << 29 + return (f&mask != 0) +} + +// Set29 will set the flag bit value at index 29. +func (f Flags64) Set29() Flags64 { + const mask = Flags64(1) << 29 + return f | mask +} + +// Unset29 will unset the flag bit value at index 29. +func (f Flags64) Unset29() Flags64 { + const mask = Flags64(1) << 29 + return f & ^mask +} + +// Get30 will fetch the flag bit value at index 30. +func (f Flags64) Get30() bool { + const mask = Flags64(1) << 30 + return (f&mask != 0) +} + +// Set30 will set the flag bit value at index 30. +func (f Flags64) Set30() Flags64 { + const mask = Flags64(1) << 30 + return f | mask +} + +// Unset30 will unset the flag bit value at index 30. +func (f Flags64) Unset30() Flags64 { + const mask = Flags64(1) << 30 + return f & ^mask +} + +// Get31 will fetch the flag bit value at index 31. +func (f Flags64) Get31() bool { + const mask = Flags64(1) << 31 + return (f&mask != 0) +} + +// Set31 will set the flag bit value at index 31. +func (f Flags64) Set31() Flags64 { + const mask = Flags64(1) << 31 + return f | mask +} + +// Unset31 will unset the flag bit value at index 31. +func (f Flags64) Unset31() Flags64 { + const mask = Flags64(1) << 31 + return f & ^mask +} + +// Get32 will fetch the flag bit value at index 32. +func (f Flags64) Get32() bool { + const mask = Flags64(1) << 32 + return (f&mask != 0) +} + +// Set32 will set the flag bit value at index 32. +func (f Flags64) Set32() Flags64 { + const mask = Flags64(1) << 32 + return f | mask +} + +// Unset32 will unset the flag bit value at index 32. +func (f Flags64) Unset32() Flags64 { + const mask = Flags64(1) << 32 + return f & ^mask +} + +// Get33 will fetch the flag bit value at index 33. +func (f Flags64) Get33() bool { + const mask = Flags64(1) << 33 + return (f&mask != 0) +} + +// Set33 will set the flag bit value at index 33. +func (f Flags64) Set33() Flags64 { + const mask = Flags64(1) << 33 + return f | mask +} + +// Unset33 will unset the flag bit value at index 33. +func (f Flags64) Unset33() Flags64 { + const mask = Flags64(1) << 33 + return f & ^mask +} + +// Get34 will fetch the flag bit value at index 34. +func (f Flags64) Get34() bool { + const mask = Flags64(1) << 34 + return (f&mask != 0) +} + +// Set34 will set the flag bit value at index 34. +func (f Flags64) Set34() Flags64 { + const mask = Flags64(1) << 34 + return f | mask +} + +// Unset34 will unset the flag bit value at index 34. +func (f Flags64) Unset34() Flags64 { + const mask = Flags64(1) << 34 + return f & ^mask +} + +// Get35 will fetch the flag bit value at index 35. +func (f Flags64) Get35() bool { + const mask = Flags64(1) << 35 + return (f&mask != 0) +} + +// Set35 will set the flag bit value at index 35. +func (f Flags64) Set35() Flags64 { + const mask = Flags64(1) << 35 + return f | mask +} + +// Unset35 will unset the flag bit value at index 35. +func (f Flags64) Unset35() Flags64 { + const mask = Flags64(1) << 35 + return f & ^mask +} + +// Get36 will fetch the flag bit value at index 36. +func (f Flags64) Get36() bool { + const mask = Flags64(1) << 36 + return (f&mask != 0) +} + +// Set36 will set the flag bit value at index 36. +func (f Flags64) Set36() Flags64 { + const mask = Flags64(1) << 36 + return f | mask +} + +// Unset36 will unset the flag bit value at index 36. +func (f Flags64) Unset36() Flags64 { + const mask = Flags64(1) << 36 + return f & ^mask +} + +// Get37 will fetch the flag bit value at index 37. +func (f Flags64) Get37() bool { + const mask = Flags64(1) << 37 + return (f&mask != 0) +} + +// Set37 will set the flag bit value at index 37. +func (f Flags64) Set37() Flags64 { + const mask = Flags64(1) << 37 + return f | mask +} + +// Unset37 will unset the flag bit value at index 37. +func (f Flags64) Unset37() Flags64 { + const mask = Flags64(1) << 37 + return f & ^mask +} + +// Get38 will fetch the flag bit value at index 38. +func (f Flags64) Get38() bool { + const mask = Flags64(1) << 38 + return (f&mask != 0) +} + +// Set38 will set the flag bit value at index 38. +func (f Flags64) Set38() Flags64 { + const mask = Flags64(1) << 38 + return f | mask +} + +// Unset38 will unset the flag bit value at index 38. +func (f Flags64) Unset38() Flags64 { + const mask = Flags64(1) << 38 + return f & ^mask +} + +// Get39 will fetch the flag bit value at index 39. +func (f Flags64) Get39() bool { + const mask = Flags64(1) << 39 + return (f&mask != 0) +} + +// Set39 will set the flag bit value at index 39. +func (f Flags64) Set39() Flags64 { + const mask = Flags64(1) << 39 + return f | mask +} + +// Unset39 will unset the flag bit value at index 39. +func (f Flags64) Unset39() Flags64 { + const mask = Flags64(1) << 39 + return f & ^mask +} + +// Get40 will fetch the flag bit value at index 40. +func (f Flags64) Get40() bool { + const mask = Flags64(1) << 40 + return (f&mask != 0) +} + +// Set40 will set the flag bit value at index 40. +func (f Flags64) Set40() Flags64 { + const mask = Flags64(1) << 40 + return f | mask +} + +// Unset40 will unset the flag bit value at index 40. +func (f Flags64) Unset40() Flags64 { + const mask = Flags64(1) << 40 + return f & ^mask +} + +// Get41 will fetch the flag bit value at index 41. +func (f Flags64) Get41() bool { + const mask = Flags64(1) << 41 + return (f&mask != 0) +} + +// Set41 will set the flag bit value at index 41. +func (f Flags64) Set41() Flags64 { + const mask = Flags64(1) << 41 + return f | mask +} + +// Unset41 will unset the flag bit value at index 41. +func (f Flags64) Unset41() Flags64 { + const mask = Flags64(1) << 41 + return f & ^mask +} + +// Get42 will fetch the flag bit value at index 42. +func (f Flags64) Get42() bool { + const mask = Flags64(1) << 42 + return (f&mask != 0) +} + +// Set42 will set the flag bit value at index 42. +func (f Flags64) Set42() Flags64 { + const mask = Flags64(1) << 42 + return f | mask +} + +// Unset42 will unset the flag bit value at index 42. +func (f Flags64) Unset42() Flags64 { + const mask = Flags64(1) << 42 + return f & ^mask +} + +// Get43 will fetch the flag bit value at index 43. +func (f Flags64) Get43() bool { + const mask = Flags64(1) << 43 + return (f&mask != 0) +} + +// Set43 will set the flag bit value at index 43. +func (f Flags64) Set43() Flags64 { + const mask = Flags64(1) << 43 + return f | mask +} + +// Unset43 will unset the flag bit value at index 43. +func (f Flags64) Unset43() Flags64 { + const mask = Flags64(1) << 43 + return f & ^mask +} + +// Get44 will fetch the flag bit value at index 44. +func (f Flags64) Get44() bool { + const mask = Flags64(1) << 44 + return (f&mask != 0) +} + +// Set44 will set the flag bit value at index 44. +func (f Flags64) Set44() Flags64 { + const mask = Flags64(1) << 44 + return f | mask +} + +// Unset44 will unset the flag bit value at index 44. +func (f Flags64) Unset44() Flags64 { + const mask = Flags64(1) << 44 + return f & ^mask +} + +// Get45 will fetch the flag bit value at index 45. +func (f Flags64) Get45() bool { + const mask = Flags64(1) << 45 + return (f&mask != 0) +} + +// Set45 will set the flag bit value at index 45. +func (f Flags64) Set45() Flags64 { + const mask = Flags64(1) << 45 + return f | mask +} + +// Unset45 will unset the flag bit value at index 45. +func (f Flags64) Unset45() Flags64 { + const mask = Flags64(1) << 45 + return f & ^mask +} + +// Get46 will fetch the flag bit value at index 46. +func (f Flags64) Get46() bool { + const mask = Flags64(1) << 46 + return (f&mask != 0) +} + +// Set46 will set the flag bit value at index 46. +func (f Flags64) Set46() Flags64 { + const mask = Flags64(1) << 46 + return f | mask +} + +// Unset46 will unset the flag bit value at index 46. +func (f Flags64) Unset46() Flags64 { + const mask = Flags64(1) << 46 + return f & ^mask +} + +// Get47 will fetch the flag bit value at index 47. +func (f Flags64) Get47() bool { + const mask = Flags64(1) << 47 + return (f&mask != 0) +} + +// Set47 will set the flag bit value at index 47. +func (f Flags64) Set47() Flags64 { + const mask = Flags64(1) << 47 + return f | mask +} + +// Unset47 will unset the flag bit value at index 47. +func (f Flags64) Unset47() Flags64 { + const mask = Flags64(1) << 47 + return f & ^mask +} + +// Get48 will fetch the flag bit value at index 48. +func (f Flags64) Get48() bool { + const mask = Flags64(1) << 48 + return (f&mask != 0) +} + +// Set48 will set the flag bit value at index 48. +func (f Flags64) Set48() Flags64 { + const mask = Flags64(1) << 48 + return f | mask +} + +// Unset48 will unset the flag bit value at index 48. +func (f Flags64) Unset48() Flags64 { + const mask = Flags64(1) << 48 + return f & ^mask +} + +// Get49 will fetch the flag bit value at index 49. +func (f Flags64) Get49() bool { + const mask = Flags64(1) << 49 + return (f&mask != 0) +} + +// Set49 will set the flag bit value at index 49. +func (f Flags64) Set49() Flags64 { + const mask = Flags64(1) << 49 + return f | mask +} + +// Unset49 will unset the flag bit value at index 49. +func (f Flags64) Unset49() Flags64 { + const mask = Flags64(1) << 49 + return f & ^mask +} + +// Get50 will fetch the flag bit value at index 50. +func (f Flags64) Get50() bool { + const mask = Flags64(1) << 50 + return (f&mask != 0) +} + +// Set50 will set the flag bit value at index 50. +func (f Flags64) Set50() Flags64 { + const mask = Flags64(1) << 50 + return f | mask +} + +// Unset50 will unset the flag bit value at index 50. +func (f Flags64) Unset50() Flags64 { + const mask = Flags64(1) << 50 + return f & ^mask +} + +// Get51 will fetch the flag bit value at index 51. +func (f Flags64) Get51() bool { + const mask = Flags64(1) << 51 + return (f&mask != 0) +} + +// Set51 will set the flag bit value at index 51. +func (f Flags64) Set51() Flags64 { + const mask = Flags64(1) << 51 + return f | mask +} + +// Unset51 will unset the flag bit value at index 51. +func (f Flags64) Unset51() Flags64 { + const mask = Flags64(1) << 51 + return f & ^mask +} + +// Get52 will fetch the flag bit value at index 52. +func (f Flags64) Get52() bool { + const mask = Flags64(1) << 52 + return (f&mask != 0) +} + +// Set52 will set the flag bit value at index 52. +func (f Flags64) Set52() Flags64 { + const mask = Flags64(1) << 52 + return f | mask +} + +// Unset52 will unset the flag bit value at index 52. +func (f Flags64) Unset52() Flags64 { + const mask = Flags64(1) << 52 + return f & ^mask +} + +// Get53 will fetch the flag bit value at index 53. +func (f Flags64) Get53() bool { + const mask = Flags64(1) << 53 + return (f&mask != 0) +} + +// Set53 will set the flag bit value at index 53. +func (f Flags64) Set53() Flags64 { + const mask = Flags64(1) << 53 + return f | mask +} + +// Unset53 will unset the flag bit value at index 53. +func (f Flags64) Unset53() Flags64 { + const mask = Flags64(1) << 53 + return f & ^mask +} + +// Get54 will fetch the flag bit value at index 54. +func (f Flags64) Get54() bool { + const mask = Flags64(1) << 54 + return (f&mask != 0) +} + +// Set54 will set the flag bit value at index 54. +func (f Flags64) Set54() Flags64 { + const mask = Flags64(1) << 54 + return f | mask +} + +// Unset54 will unset the flag bit value at index 54. +func (f Flags64) Unset54() Flags64 { + const mask = Flags64(1) << 54 + return f & ^mask +} + +// Get55 will fetch the flag bit value at index 55. +func (f Flags64) Get55() bool { + const mask = Flags64(1) << 55 + return (f&mask != 0) +} + +// Set55 will set the flag bit value at index 55. +func (f Flags64) Set55() Flags64 { + const mask = Flags64(1) << 55 + return f | mask +} + +// Unset55 will unset the flag bit value at index 55. +func (f Flags64) Unset55() Flags64 { + const mask = Flags64(1) << 55 + return f & ^mask +} + +// Get56 will fetch the flag bit value at index 56. +func (f Flags64) Get56() bool { + const mask = Flags64(1) << 56 + return (f&mask != 0) +} + +// Set56 will set the flag bit value at index 56. +func (f Flags64) Set56() Flags64 { + const mask = Flags64(1) << 56 + return f | mask +} + +// Unset56 will unset the flag bit value at index 56. +func (f Flags64) Unset56() Flags64 { + const mask = Flags64(1) << 56 + return f & ^mask +} + +// Get57 will fetch the flag bit value at index 57. +func (f Flags64) Get57() bool { + const mask = Flags64(1) << 57 + return (f&mask != 0) +} + +// Set57 will set the flag bit value at index 57. +func (f Flags64) Set57() Flags64 { + const mask = Flags64(1) << 57 + return f | mask +} + +// Unset57 will unset the flag bit value at index 57. +func (f Flags64) Unset57() Flags64 { + const mask = Flags64(1) << 57 + return f & ^mask +} + +// Get58 will fetch the flag bit value at index 58. +func (f Flags64) Get58() bool { + const mask = Flags64(1) << 58 + return (f&mask != 0) +} + +// Set58 will set the flag bit value at index 58. +func (f Flags64) Set58() Flags64 { + const mask = Flags64(1) << 58 + return f | mask +} + +// Unset58 will unset the flag bit value at index 58. +func (f Flags64) Unset58() Flags64 { + const mask = Flags64(1) << 58 + return f & ^mask +} + +// Get59 will fetch the flag bit value at index 59. +func (f Flags64) Get59() bool { + const mask = Flags64(1) << 59 + return (f&mask != 0) +} + +// Set59 will set the flag bit value at index 59. +func (f Flags64) Set59() Flags64 { + const mask = Flags64(1) << 59 + return f | mask +} + +// Unset59 will unset the flag bit value at index 59. +func (f Flags64) Unset59() Flags64 { + const mask = Flags64(1) << 59 + return f & ^mask +} + +// Get60 will fetch the flag bit value at index 60. +func (f Flags64) Get60() bool { + const mask = Flags64(1) << 60 + return (f&mask != 0) +} + +// Set60 will set the flag bit value at index 60. +func (f Flags64) Set60() Flags64 { + const mask = Flags64(1) << 60 + return f | mask +} + +// Unset60 will unset the flag bit value at index 60. +func (f Flags64) Unset60() Flags64 { + const mask = Flags64(1) << 60 + return f & ^mask +} + +// Get61 will fetch the flag bit value at index 61. +func (f Flags64) Get61() bool { + const mask = Flags64(1) << 61 + return (f&mask != 0) +} + +// Set61 will set the flag bit value at index 61. +func (f Flags64) Set61() Flags64 { + const mask = Flags64(1) << 61 + return f | mask +} + +// Unset61 will unset the flag bit value at index 61. +func (f Flags64) Unset61() Flags64 { + const mask = Flags64(1) << 61 + return f & ^mask +} + +// Get62 will fetch the flag bit value at index 62. +func (f Flags64) Get62() bool { + const mask = Flags64(1) << 62 + return (f&mask != 0) +} + +// Set62 will set the flag bit value at index 62. +func (f Flags64) Set62() Flags64 { + const mask = Flags64(1) << 62 + return f | mask +} + +// Unset62 will unset the flag bit value at index 62. +func (f Flags64) Unset62() Flags64 { + const mask = Flags64(1) << 62 + return f & ^mask +} + +// Get63 will fetch the flag bit value at index 63. +func (f Flags64) Get63() bool { + const mask = Flags64(1) << 63 + return (f&mask != 0) +} + +// Set63 will set the flag bit value at index 63. +func (f Flags64) Set63() Flags64 { + const mask = Flags64(1) << 63 + return f | mask +} + +// Unset63 will unset the flag bit value at index 63. +func (f Flags64) Unset63() Flags64 { + const mask = Flags64(1) << 63 + return f & ^mask +} + +// String returns a human readable representation of Flags64. +func (f Flags64) String() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = 1 + (len("false ") * 64) - 1 + 1 + buf = make([]byte, prealloc) + + buf[i] = '{' + i++ + + val = f.Get0() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get8() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get9() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get10() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get11() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get12() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get13() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get14() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get15() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get16() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get17() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get18() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get19() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get20() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get21() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get22() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get23() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get24() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get25() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get26() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get27() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get28() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get29() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get30() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get31() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get32() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get33() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get34() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get35() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get36() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get37() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get38() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get39() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get40() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get41() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get42() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get43() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get44() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get45() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get46() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get47() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get48() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get49() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get50() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get51() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get52() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get53() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get54() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get55() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get56() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get57() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get58() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get59() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get60() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get61() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get62() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get63() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// GoString returns a more verbose human readable representation of Flags64. +func (f Flags64) GoString() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = len("bitutil.Flags64{") + (len("63=false ") * 64) - 1 + 1 + buf = make([]byte, prealloc) + + i += copy(buf[i:], "bitutil.Flags64{") + + val = f.Get0() + i += copy(buf[i:], "0=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get1() + i += copy(buf[i:], "1=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get2() + i += copy(buf[i:], "2=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get3() + i += copy(buf[i:], "3=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get4() + i += copy(buf[i:], "4=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get5() + i += copy(buf[i:], "5=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get6() + i += copy(buf[i:], "6=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get7() + i += copy(buf[i:], "7=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get8() + i += copy(buf[i:], "8=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get9() + i += copy(buf[i:], "9=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get10() + i += copy(buf[i:], "10=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get11() + i += copy(buf[i:], "11=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get12() + i += copy(buf[i:], "12=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get13() + i += copy(buf[i:], "13=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get14() + i += copy(buf[i:], "14=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get15() + i += copy(buf[i:], "15=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get16() + i += copy(buf[i:], "16=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get17() + i += copy(buf[i:], "17=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get18() + i += copy(buf[i:], "18=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get19() + i += copy(buf[i:], "19=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get20() + i += copy(buf[i:], "20=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get21() + i += copy(buf[i:], "21=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get22() + i += copy(buf[i:], "22=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get23() + i += copy(buf[i:], "23=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get24() + i += copy(buf[i:], "24=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get25() + i += copy(buf[i:], "25=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get26() + i += copy(buf[i:], "26=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get27() + i += copy(buf[i:], "27=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get28() + i += copy(buf[i:], "28=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get29() + i += copy(buf[i:], "29=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get30() + i += copy(buf[i:], "30=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get31() + i += copy(buf[i:], "31=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get32() + i += copy(buf[i:], "32=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get33() + i += copy(buf[i:], "33=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get34() + i += copy(buf[i:], "34=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get35() + i += copy(buf[i:], "35=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get36() + i += copy(buf[i:], "36=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get37() + i += copy(buf[i:], "37=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get38() + i += copy(buf[i:], "38=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get39() + i += copy(buf[i:], "39=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get40() + i += copy(buf[i:], "40=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get41() + i += copy(buf[i:], "41=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get42() + i += copy(buf[i:], "42=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get43() + i += copy(buf[i:], "43=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get44() + i += copy(buf[i:], "44=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get45() + i += copy(buf[i:], "45=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get46() + i += copy(buf[i:], "46=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get47() + i += copy(buf[i:], "47=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get48() + i += copy(buf[i:], "48=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get49() + i += copy(buf[i:], "49=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get50() + i += copy(buf[i:], "50=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get51() + i += copy(buf[i:], "51=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get52() + i += copy(buf[i:], "52=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get53() + i += copy(buf[i:], "53=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get54() + i += copy(buf[i:], "54=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get55() + i += copy(buf[i:], "55=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get56() + i += copy(buf[i:], "56=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get57() + i += copy(buf[i:], "57=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get58() + i += copy(buf[i:], "58=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get59() + i += copy(buf[i:], "59=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get60() + i += copy(buf[i:], "60=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get61() + i += copy(buf[i:], "61=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get62() + i += copy(buf[i:], "62=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + val = f.Get63() + i += copy(buf[i:], "63=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +func bool2str(b bool) string { + if b { + return "true" + } + return "false" +} diff --git a/vendor/codeberg.org/gruf/go-bitutil/flag.tpl b/vendor/codeberg.org/gruf/go-bitutil/flag.tpl new file mode 100644 index 000000000..ac00bfa97 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/flag.tpl @@ -0,0 +1,117 @@ +package bitutil + +import ( + "strings" + "unsafe" +) + +{{ range $idx, $size := . }} + +// Flags{{ $size.Size }} is a type-casted unsigned integer with helper +// methods for easily managing up to {{ $size.Size }} bit-flags. +type Flags{{ $size.Size }} uint{{ $size.Size }} + +// Get will fetch the flag bit value at index 'bit'. +func (f Flags{{ $size.Size }}) Get(bit uint8) bool { + mask := Flags{{ $size.Size }}(1) << bit + return (f & mask != 0) +} + +// Set will set the flag bit value at index 'bit'. +func (f Flags{{ $size.Size }}) Set(bit uint8) Flags{{ $size.Size }} { + mask := Flags{{ $size.Size }}(1) << bit + return f | mask +} + +// Unset will unset the flag bit value at index 'bit'. +func (f Flags{{ $size.Size }}) Unset(bit uint8) Flags{{ $size.Size }} { + mask := Flags{{ $size.Size }}(1) << bit + return f & ^mask +} + +{{ range $idx := $size.Bits }} + +// Get{{ $idx }} will fetch the flag bit value at index {{ $idx }}. +func (f Flags{{ $size.Size }}) Get{{ $idx }}() bool { + const mask = Flags{{ $size.Size }}(1) << {{ $idx }} + return (f & mask != 0) +} + +// Set{{ $idx }} will set the flag bit value at index {{ $idx }}. +func (f Flags{{ $size.Size }}) Set{{ $idx }}() Flags{{ $size.Size }} { + const mask = Flags{{ $size.Size }}(1) << {{ $idx }} + return f | mask +} + +// Unset{{ $idx }} will unset the flag bit value at index {{ $idx }}. +func (f Flags{{ $size.Size }}) Unset{{ $idx }}() Flags{{ $size.Size }} { + const mask = Flags{{ $size.Size }}(1) << {{ $idx }} + return f & ^mask +} + +{{ end }} + +// String returns a human readable representation of Flags{{ $size.Size }}. +func (f Flags{{ $size.Size }}) String() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = 1+(len("false ")*{{ $size.Size }})-1+1 + buf = make([]byte, prealloc) + + buf[i] = '{' + i++ + + {{ range $idx := .Bits }} + val = f.Get{{ $idx }}() + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + {{ end }} + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +// GoString returns a more verbose human readable representation of Flags{{ $size.Size }}. +func (f Flags{{ $size.Size }})GoString() string { + var ( + i int + val bool + buf []byte + ) + + // Make a prealloc est. based on longest-possible value + const prealloc = len("bitutil.Flags{{ $size.Size }}{")+(len("{{ sub $size.Size 1 }}=false ")*{{ $size.Size }})-1+1 + buf = make([]byte, prealloc) + + i += copy(buf[i:], "bitutil.Flags{{ $size.Size }}{") + + {{ range $idx := .Bits }} + val = f.Get{{ $idx }}() + i += copy(buf[i:], "{{ $idx }}=") + i += copy(buf[i:], bool2str(val)) + buf[i] = ' ' + i++ + {{ end }} + + buf[i-1] = '}' + buf = buf[:i] + + return *(*string)(unsafe.Pointer(&buf)) +} + +{{ end }} + +func bool2str(b bool) string { + if b { + return "true" + } + return "false" +} \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-bitutil/flag_test.tpl b/vendor/codeberg.org/gruf/go-bitutil/flag_test.tpl new file mode 100644 index 000000000..e85cc2dff --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/flag_test.tpl @@ -0,0 +1,98 @@ +package bitutil_test + +import ( + "strings" + "testing" + + "codeberg.org/gruf/go-bytes" +) + +{{ range $idx, $size := . }} + +func TestFlags{{ $size.Size }}Get(t *testing.T) { + var mask, flags bitutil.Flags{{ $size.Size }} + + {{ range $idx := $size.Bits }} + + mask = bitutil.Flags{{ $size.Size }}(1) << {{ $idx }} + + flags = 0 + + flags |= mask + if !flags.Get({{ $idx }}) { + t.Error("failed .Get() set Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + flags = ^bitutil.Flags{{ $size.Size }}(0) + + flags &= ^mask + if flags.Get({{ $idx }}) { + t.Error("failed .Get() unset Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + flags = 0 + + flags |= mask + if !flags.Get{{ $idx }}() { + t.Error("failed .Get{{ $idx }}() set Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + flags = ^bitutil.Flags{{ $size.Size }}(0) + + flags &= ^mask + if flags.Get{{ $idx }}() { + t.Error("failed .Get{{ $idx }}() unset Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + {{ end }} +} + +func TestFlags{{ $size.Size }}Set(t *testing.T) { + var mask, flags bitutil.Flags{{ $size.Size }} + + {{ range $idx := $size.Bits }} + + mask = bitutil.Flags{{ $size.Size }}(1) << {{ $idx }} + + flags = 0 + + flags = flags.Set({{ $idx }}) + if flags & mask == 0 { + t.Error("failed .Set() Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + flags = 0 + + flags = flags.Set{{ $idx }}() + if flags & mask == 0 { + t.Error("failed .Set{{ $idx }}() Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + {{ end }} +} + +func TestFlags{{ $size.Size }}Unset(t *testing.T) { + var mask, flags bitutil.Flags{{ $size.Size }} + + {{ range $idx := $size.Bits }} + + mask = bitutil.Flags{{ $size.Size }}(1) << {{ $idx }} + + flags = ^bitutil.Flags{{ $size.Size }}(0) + + flags = flags.Unset({{ $idx }}) + if flags & mask != 0 { + t.Error("failed .Unset() Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + flags = ^bitutil.Flags{{ $size.Size }}(0) + + flags = flags.Unset{{ $idx }}() + if flags & mask != 0 { + t.Error("failed .Unset{{ $idx }}() Flags{{ $size.Size }} bit at index {{ $idx }}") + } + + {{ end }} +} + +{{ end }} \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-bitutil/pack.go b/vendor/codeberg.org/gruf/go-bitutil/pack.go new file mode 100644 index 000000000..2a57d3294 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/pack.go @@ -0,0 +1,85 @@ +package bitutil + +// PackInt8s will pack two signed 8bit integers into an unsigned 16bit integer. +func PackInt8s(i1, i2 int8) uint16 { + const bits = 8 + const mask = (1 << bits) - 1 + return uint16(i1)<> bits), int8(i & mask) +} + +// PackInt16s will pack two signed 16bit integers into an unsigned 32bit integer. +func PackInt16s(i1, i2 int16) uint32 { + const bits = 16 + const mask = (1 << bits) - 1 + return uint32(i1)<> bits), int16(i & mask) +} + +// PackInt32s will pack two signed 32bit integers into an unsigned 64bit integer. +func PackInt32s(i1, i2 int32) uint64 { + const bits = 32 + const mask = (1 << bits) - 1 + return uint64(i1)<> bits), int32(i & mask) +} + +// PackUint8s will pack two unsigned 8bit integers into an unsigned 16bit integer. +func PackUint8s(u1, u2 uint8) uint16 { + const bits = 8 + const mask = (1 << bits) - 1 + return uint16(u1)<> bits), uint8(u & mask) +} + +// PackUint16s will pack two unsigned 16bit integers into an unsigned 32bit integer. +func PackUint16s(u1, u2 uint16) uint32 { + const bits = 16 + const mask = (1 << bits) - 1 + return uint32(u1)<> bits), uint16(u & mask) +} + +// PackUint32s will pack two unsigned 32bit integers into an unsigned 64bit integer. +func PackUint32s(u1, u2 uint32) uint64 { + const bits = 32 + const mask = (1 << bits) - 1 + return uint64(u1)<> bits), uint32(u & mask) +} diff --git a/vendor/codeberg.org/gruf/go-bitutil/test.tpl b/vendor/codeberg.org/gruf/go-bitutil/test.tpl new file mode 100644 index 000000000..4e659d81f --- /dev/null +++ b/vendor/codeberg.org/gruf/go-bitutil/test.tpl @@ -0,0 +1,60 @@ +package atomics_test + +import ( + "atomic" + "unsafe" + "testing" + + "codeberg.org/gruf/go-atomics" +) + +func Test{{ .Name }}StoreLoad(t *testing.T) { + for _, test := range {{ .Name }}Tests { + val := atomics.New{{ .Name }}() + + val.Store(test.V1) + + if !({{ call .Compare "val.Load()" "test.V1" }}) { + t.Fatalf("failed testing .Store and .Load: expect=%v actual=%v", val.Load(), test.V1) + } + + val.Store(test.V2) + + if !({{ call .Compare "val.Load()" "test.V2" }}) { + t.Fatalf("failed testing .Store and .Load: expect=%v actual=%v", val.Load(), test.V2) + } + } +} + +func Test{{ .Name }}CAS(t *testing.T) { + for _, test := range {{ .Name }}Tests { + val := atomics.New{{ .Name }}() + + val.Store(test.V1) + + if val.CAS(test.V2, test.V1) { + t.Fatalf("failed testing negative .CAS: test=%+v state=%v", test, val.Load()) + } + + if !val.CAS(test.V1, test.V2) { + t.Fatalf("failed testing positive .CAS: test=%+v state=%v", test, val.Load()) + } + } +} + +func Test{{ .Name }}Swap(t *testing.T) { + for _, test := range {{ .Name }}Tests { + val := atomics.New{{ .Name }}() + + val.Store(test.V1) + + if !({{ call .Compare "val.Swap(test.V2)" "test.V1" }}) { + t.Fatal("failed testing .Swap") + } + + if !({{ call .Compare "val.Swap(test.V1)" "test.V2" }}) { + t.Fatal("failed testing .Swap") + } + } +} + diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330e..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index aac99f196..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,216 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. -type ExponentialBackOffOpts func(*ExponentialBackOff) - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - for _, fn := range opts { - fn(b) - } - b.Reset() - return b -} - -// WithInitialInterval sets the initial interval between retries. -func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.InitialInterval = duration - } -} - -// WithRandomizationFactor sets the randomization factor to add jitter to intervals. -func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.RandomizationFactor = randomizationFactor - } -} - -// WithMultiplier sets the multiplier for increasing the interval after each retry. -func WithMultiplier(multiplier float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Multiplier = multiplier - } -} - -// WithMaxInterval sets the maximum interval between retries. -func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxInterval = duration - } -} - -// WithMaxElapsedTime sets the maximum total time for retries. -func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxElapsedTime = duration - } -} - -// WithRetryStopDuration sets the duration after which retries should stop. -func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Stop = duration - } -} - -// WithClockProvider sets the clock used to measure time. -func WithClockProvider(clock Clock) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Clock = clock - } -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index b9c0c51cd..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,146 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). -// The operation will be retried using a backoff policy if it returns an error. -type OperationWithData[T any] func() (T, error) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -func (o Operation) withEmptyData() OperationWithData[struct{}] { - return func() (struct{}, error) { - return struct{}{}, o() - } -} - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryWithData is like Retry but returns data in the response too. -func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { - return RetryNotifyWithData(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithData is like RetryNotify but returns data in the response too. -func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { - return doRetryNotify(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) - return err -} - -// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. -func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - return doRetryNotify(operation, b, notify, t) -} - -func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - var ( - err error - next time.Duration - res T - ) - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - res, err = operation() - if err == nil { - return res, nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return res, permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return res, cerr - } - - return res, err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return res, ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca37..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore similarity index 100% rename from vendor/github.com/cenkalti/backoff/v4/.gitignore rename to vendor/github.com/cenkalti/backoff/v5/.gitignore diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 000000000..658c37436 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/v4/LICENSE rename to vendor/github.com/cenkalti/backoff/v5/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md similarity index 64% rename from vendor/github.com/cenkalti/backoff/v4/README.md rename to vendor/github.com/cenkalti/backoff/v5/README.md index 9433004a2..4611b1d17 100644 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. ## Contributing @@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go similarity index 87% rename from vendor/github.com/cenkalti/backoff/v4/backoff.go rename to vendor/github.com/cenkalti/backoff/v5/backoff.go index 3676ee405..dd2b24ca7 100644 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -15,16 +15,16 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. + // backoff.Stop to indicate that no more retries should be made. // // Example usage: // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } // NextBackOff() time.Duration diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 000000000..beb2b38a2 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 000000000..c1f3e442d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,125 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 000000000..e43f47fb8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of retry attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, err + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go similarity index 80% rename from vendor/github.com/cenkalti/backoff/v4/ticker.go rename to vendor/github.com/cenkalti/backoff/v5/ticker.go index df9d68bce..f0d4b2ae7 100644 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -1,7 +1,6 @@ package backoff import ( - "context" "sync" "time" ) @@ -14,8 +13,7 @@ type Ticker struct { C <-chan time.Time c chan time.Time b BackOff - ctx context.Context - timer Timer + timer timer stop chan struct{} stopOnce sync.Once } @@ -27,22 +25,12 @@ type Ticker struct { // provided backoff policy (notably calling NextBackOff or Reset) // while the ticker is running. func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } c := make(chan time.Time) t := &Ticker{ C: c, c: c, b: b, - ctx: getContext(b), - timer: timer, + timer: &defaultTimer{}, stop: make(chan struct{}), } t.b.Reset() @@ -73,8 +61,6 @@ func (t *Ticker) run() { case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return - case <-t.ctx.Done(): - return } } } diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go similarity index 96% rename from vendor/github.com/cenkalti/backoff/v4/timer.go rename to vendor/github.com/cenkalti/backoff/v5/timer.go index 8120d0213..a89530974 100644 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -2,7 +2,7 @@ package backoff import "time" -type Timer interface { +type timer interface { Start(duration time.Duration) Stop() C() <-chan time.Time diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 41cd4f503..bbe7decf0 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -148,22 +148,20 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Error("Failed to extract ServerMetadata from context") - } + if ok { + handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseServerMetadata(w, mux, md) + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 - // Unless the request includes a TE header field indicating "trailers" - // is acceptable, as described in Section 4.3, a server SHOULD NOT - // generate trailer fields that it believes are necessary for the user - // agent to receive. - doForwardTrailers := requestAcceptsTrailers(r) - - if doForwardTrailers { - handleForwardResponseTrailerHeader(w, mux, md) - w.Header().Set("Transfer-Encoding", "chunked") + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } } st := HTTPStatusFromCode(s.Code()) @@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh grpclog.Errorf("Failed to write response: %v", err) } - if doForwardTrailers { + if ok && requestAcceptsTrailers(r) { handleForwardResponseTrailer(w, mux, md) } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index f0727cf7c..2f0b9e9e0 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -153,12 +153,10 @@ type responseBody interface { // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Error("Failed to extract ServerMetadata from context") + if ok { + handleForwardResponseServerMetadata(w, mux, md) } - handleForwardResponseServerMetadata(w, mux, md) - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 // Unless the request includes a TE header field indicating "trailers" // is acceptable, as described in Section 4.3, a server SHOULD NOT @@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha // agent to receive. doForwardTrailers := requestAcceptsTrailers(req) - if doForwardTrailers { + if ok && doForwardTrailers { handleForwardResponseTrailerHeader(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") } @@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha grpclog.Errorf("Failed to write response: %v", err) } - if doForwardTrailers { + if ok && doForwardTrailers { handleForwardResponseTrailer(w, mux, md) } } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d..4067978a1 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn { } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e..460f554f2 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e6..de83afe93 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool { return false } for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck return false } } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 5766107cf..a6b01755b 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -27,13 +27,25 @@ import ( ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,7 +62,7 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. LegacyValidation ValidationScheme = iota diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67..3c3bf910f 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 161729235..0ed55c2ba 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 1224816c2..0718239cf 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7cc..2e5334415 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 4980c875b..9bdaccc7c 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69..1b5bdbdf8 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa0..7db863307 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610..3a43e8391 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc78..5a7d2df06 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875cee..d5404a6d7 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c81..50caa7327 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 000000000..f50b38e35 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a..19e3378f7 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709..8d4b1ac05 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 527762955..0396d7201 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d868cebda..d7e0cacb4 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 142796368..368187fa8 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f57..4a64347c0 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 776f34971..d15b66ddb 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d79..4248c1716 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e820..9a297afcf 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642..4bdc90b07 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a1..fb7fd3995 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index a055197c6..dd8aa5688 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef..3810d1ac9 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 28708e074..403e6ae70 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md index 7360d188c..df0af73cc 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md @@ -1,4 +1,4 @@ -## Summary +# Prometheus Benchmarks Using the Prometheus bridge and the OTLP exporter adds roughly ~50% to the CPU and memory overhead of an application compared to serving a Prometheus HTTP endpoint for metrics. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go index 6df851609..4161ec624 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go @@ -5,13 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime" // Version is the current release version of the runtime instrumentation. func Version() string { - return "0.60.0" + return "0.61.0" // This string is updated by the pre_release.sh script during release } - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -// -// Deprecated: Use [Version] instead. -func SemVersion() string { - return Version() -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go index cd33a1682..d0cc79d54 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go @@ -13,6 +13,7 @@ import ( "strconv" "strings" "time" + "unicode" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -359,8 +360,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response, that time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially @@ -442,13 +444,15 @@ func convHeaders(s string) (map[string]string, error) { continue } - escKey, e := url.PathUnescape(rawKey) - if e != nil { + key := strings.TrimSpace(rawKey) + + // Validate the key. + if !isValidHeaderKey(key) { err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey)) continue } - key := strings.TrimSpace(escKey) + // Only decode the value. escVal, e := url.PathUnescape(rawVal) if e != nil { err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal)) @@ -651,3 +655,22 @@ func fallback[T any](val T) resolver[T] { return s } } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go index f2da12382..896c3a303 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go index dfeecf596..7bb3967f7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlplog/transform/log.go.tmpl // Copyright The OpenTelemetry Authors @@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue { return converted } -// Attrs transforms a slice of [api.KeyValue] into OTLP key-values. +// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values. func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue { if len(attrs) == 0 { return nil diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go index a68ed0591..954597340 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go @@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. func Version() string { - return "0.11.0" + return "0.12.2" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go index 279b4be4f..3af60258a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go @@ -44,20 +44,23 @@ func newNoopClient() *client { // newHTTPClient creates a new HTTP log client. func newHTTPClient(cfg config) (*client, error) { - hc := &http.Client{ - Transport: ourTransport, - Timeout: cfg.timeout.Value, - } - - if cfg.tlsCfg.Value != nil || cfg.proxy.Value != nil { - clonedTransport := ourTransport.Clone() - hc.Transport = clonedTransport - - if cfg.tlsCfg.Value != nil { - clonedTransport.TLSClientConfig = cfg.tlsCfg.Value + hc := cfg.httpClient + if hc == nil { + hc = &http.Client{ + Transport: ourTransport, + Timeout: cfg.timeout.Value, } - if cfg.proxy.Value != nil { - clonedTransport.Proxy = cfg.proxy.Value + + if cfg.tlsCfg.Value != nil || cfg.proxy.Value != nil { + clonedTransport := ourTransport.Clone() + hc.Transport = clonedTransport + + if cfg.tlsCfg.Value != nil { + clonedTransport.TLSClientConfig = cfg.tlsCfg.Value + } + if cfg.proxy.Value != nil { + clonedTransport.Proxy = cfg.proxy.Value + } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go index bfe768091..66140f3fe 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry" @@ -94,6 +95,7 @@ type config struct { timeout setting[time.Duration] proxy setting[HTTPTransportProxyFunc] retryCfg setting[retry.Config] + httpClient *http.Client } func newConfig(options []Option) config { @@ -343,6 +345,25 @@ func WithProxy(pf HTTPTransportProxyFunc) Option { }) } +// WithHTTPClient sets the HTTP client to used by the exporter. +// +// This option will take precedence over [WithProxy], [WithTimeout], +// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE, +// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT, +// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT environment variables. +// +// Timeout and all other fields of the passed [http.Client] are left intact. +// +// Be aware that passing an HTTP client with transport like +// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can +// cause the client to be instrumented twice and cause infinite recursion. +func WithHTTPClient(c *http.Client) Option { + return fnOpt(func(cfg config) config { + cfg.httpClient = c + return cfg + }) +} + // setting is a configuration setting value. type setting[T any] struct { Value T @@ -544,13 +565,15 @@ func convHeaders(s string) (map[string]string, error) { continue } - escKey, e := url.PathUnescape(rawKey) - if e != nil { + key := strings.TrimSpace(rawKey) + + // Validate the key. + if !isValidHeaderKey(key) { err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey)) continue } - key := strings.TrimSpace(escKey) + // Only decode the value. escVal, e := url.PathUnescape(rawVal) if e != nil { err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal)) @@ -600,3 +623,22 @@ func fallback[T any](val T) resolver[T] { return s } } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go index 661576ce2..bd9a750a1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go index adf407800..c3d9710c2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlplog/transform/log.go.tmpl // Copyright The OpenTelemetry Authors @@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue { return converted } -// Attrs transforms a slice of [api.KeyValue] into OTLP key-values. +// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values. func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue { if len(attrs) == 0 { return nil diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go index 8315200fa..9702a4c0b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go @@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. func Version() string { - return "0.11.0" + return "0.12.2" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go index db6e3714b..c831bb60b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -238,8 +238,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response, that time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go index 261f55026..2cd98b929 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go index 95e2f4ba3..b29cd11a6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlpmetricgrpc package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go index 7ae53f2d1..b54a173b6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "METRICS_CLIENT_CERTIFICATE", + "METRICS_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), @@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption { WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), - withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), - withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + withEnvTemporalityPreference( + "METRICS_TEMPORALITY_PREFERENCE", + func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }, + ), + withEnvAggPreference( + "METRICS_DEFAULT_HISTOGRAM_AGGREGATION", + func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }, + ), ) return opts @@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) case "lowmemory": fn(lowMemory) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", + "value", + s, + ) } } } @@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e return metric.DefaultAggregationSelector(kind) }) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", + "value", + s, + ) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index 2ac8db5a8..cb77ae6a9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package oconf provides configuration for the otlpmetric exporters. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" import ( @@ -56,13 +57,15 @@ type ( Timeout time.Duration URLPath string - // gRPC configurations - GRPCCredentials credentials.TransportCredentials - TemporalitySelector metric.TemporalitySelector AggregationSelector metric.AggregationSelector - Proxy HTTPTransportProxyFunc + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -372,3 +375,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go index 83f6d7fd1..c18a6b1f2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go index 03e7fbcdf..e4547b3a6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go index 50e25fdbc..6af5591ea 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go index cc3a77055..37cc6c519 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go index 2605c74d0..cb70a9c41 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go index d31652b4d..f03bfec41 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go index abf7f0219..9c156e91b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl // Copyright The OpenTelemetry Authors @@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. -func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { +func ExponentialHistogram[N int64 | float64]( + h metricdata.ExponentialHistogram[N], +) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err @@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. -func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { +func ExponentialHistogramDataPoints[N int64 | float64]( + dPts []metricdata.ExponentialHistogramDataPoint[N], +) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) @@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. -func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { +func ExponentialHistogramDataPointBuckets( + bucket metricdata.ExponentialBucket, +) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index 0b5dec3ac..58859f2c2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.35.0" + return "1.36.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 86da30e37..23f1f0031 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -55,20 +55,23 @@ var ourTransport = &http.Transport{ // newClient creates a new HTTP metric client. func newClient(cfg oconf.Config) (*client, error) { - httpClient := &http.Client{ - Transport: ourTransport, - Timeout: cfg.Metrics.Timeout, - } - - if cfg.Metrics.TLSCfg != nil || cfg.Metrics.Proxy != nil { - clonedTransport := ourTransport.Clone() - httpClient.Transport = clonedTransport - - if cfg.Metrics.TLSCfg != nil { - clonedTransport.TLSClientConfig = cfg.Metrics.TLSCfg + httpClient := cfg.Metrics.HTTPClient + if httpClient == nil { + httpClient = &http.Client{ + Transport: ourTransport, + Timeout: cfg.Metrics.Timeout, } - if cfg.Metrics.Proxy != nil { - clonedTransport.Proxy = cfg.Metrics.Proxy + + if cfg.Metrics.TLSCfg != nil || cfg.Metrics.Proxy != nil { + clonedTransport := ourTransport.Clone() + httpClient.Transport = clonedTransport + + if cfg.Metrics.TLSCfg != nil { + clonedTransport.TLSClientConfig = cfg.Metrics.TLSCfg + } + if cfg.Metrics.Proxy != nil { + clonedTransport.Proxy = cfg.Metrics.Proxy + } } } @@ -277,7 +280,7 @@ type request struct { // reset reinitializes the request Body and uses ctx for the request. func (r *request) reset(ctx context.Context) { r.Body = r.bodyReader() - r.Request = r.Request.WithContext(ctx) + r.Request = r.WithContext(ctx) } // retryableError represents a request failure that can be retried. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go index bf05adcf1..2b144f7eb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go @@ -222,3 +222,19 @@ func WithAggregationSelector(selector metric.AggregationSelector) Option { func WithProxy(pf HTTPTransportProxyFunc) Option { return wrappedOption{oconf.WithProxy(oconf.HTTPTransportProxyFunc(pf))} } + +// WithHTTPClient sets the HTTP client to used by the exporter. +// +// This option will take precedence over [WithProxy], [WithTimeout], +// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE, +// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT, +// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT environment variables. +// +// Timeout and all other fields of the passed [http.Client] are left intact. +// +// Be aware that passing an HTTP client with transport like +// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can +// cause the client to be instrumented twice and cause infinite recursion. +func WithHTTPClient(c *http.Client) Option { + return wrappedOption{oconf.WithHTTPClient(c)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go index 7ac42759f..8be035fca 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go index 1b379f10c..8849f341a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlpmetrichttp package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go index 89b134a39..ef318ac67 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "METRICS_CLIENT_CERTIFICATE", + "METRICS_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), @@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption { WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), - withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), - withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + withEnvTemporalityPreference( + "METRICS_TEMPORALITY_PREFERENCE", + func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }, + ), + withEnvAggPreference( + "METRICS_DEFAULT_HISTOGRAM_AGGREGATION", + func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }, + ), ) return opts @@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) case "lowmemory": fn(lowMemory) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", + "value", + s, + ) } } } @@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e return metric.DefaultAggregationSelector(kind) }) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", + "value", + s, + ) } } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go index db595e49e..cfe629a97 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package oconf provides configuration for the otlpmetric exporters. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" import ( @@ -56,13 +57,15 @@ type ( Timeout time.Duration URLPath string - // gRPC configurations - GRPCCredentials credentials.TransportCredentials - TemporalitySelector metric.TemporalitySelector AggregationSelector metric.AggregationSelector - Proxy HTTPTransportProxyFunc + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -372,3 +375,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go index ae3d09787..d7b005c97 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go index f603dc605..e335cbd09 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go index ed93844a4..c3b57c57c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go index a9a08ffe6..c855bdc93 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go index d607da78e..6c9787189 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go index bb6d21f0b..f65c87cbf 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go index 8207b15a4..5e5f26aa4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl // Copyright The OpenTelemetry Authors @@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. -func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { +func ExponentialHistogram[N int64 | float64]( + h metricdata.ExponentialHistogram[N], +) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err @@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. -func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { +func ExponentialHistogramDataPoints[N int64 | float64]( + dPts []metricdata.ExponentialHistogramDataPoint[N], +) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) @@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. -func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { +func ExponentialHistogramDataPointBuckets( + bucket metricdata.ExponentialBucket, +) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 2a67f5800..528533321 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.35.0" + return "1.36.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index 4571a5ca3..ca4544f0d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package tracetransform provides conversion functionality for the otlptrace +// exporters. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 4abf48d1f..6eacdf311 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index 97cd6c54f..b6e6b10fb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlptracegrpc package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 7bb189a94..1d840be20 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 0a317d926..506ca00b6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package otlpconfig provides configuration for the otlptrace exporters. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( @@ -52,7 +53,9 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials - Proxy HTTPTransportProxyFunc + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go index 3d4f699d4..918490387 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go index 38b97a013..ba6e41183 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index a12ea4c48..1c4659423 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 1c5450ab6..777e68a7b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 00ab1f20c..2da229870 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response. That time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 16c006b2c..583a8f867 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -71,20 +71,24 @@ var _ otlptrace.Client = (*client)(nil) func NewClient(opts ...Option) otlptrace.Client { cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) - httpClient := &http.Client{ - Transport: ourTransport, - Timeout: cfg.Traces.Timeout, - } + httpClient := cfg.Traces.HTTPClient - if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil { - clonedTransport := ourTransport.Clone() - httpClient.Transport = clonedTransport - - if cfg.Traces.TLSCfg != nil { - clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg + if httpClient == nil { + httpClient = &http.Client{ + Transport: ourTransport, + Timeout: cfg.Traces.Timeout, } - if cfg.Traces.Proxy != nil { - clonedTransport.Proxy = cfg.Traces.Proxy + + if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil { + clonedTransport := ourTransport.Clone() + httpClient.Transport = clonedTransport + + if cfg.Traces.TLSCfg != nil { + clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg + } + if cfg.Traces.Proxy != nil { + clonedTransport.Proxy = cfg.Traces.Proxy + } } } @@ -300,7 +304,7 @@ type request struct { // reset reinitializes the request Body and uses ctx for the request. func (r *request) reset(ctx context.Context) { r.Body = r.bodyReader() - r.Request = r.Request.WithContext(ctx) + r.Request = r.WithContext(ctx) } // retryableError represents a request failure that can be retried. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go index f30bb66ae..f4385fb4e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go index e4142b9d7..3d344dc85 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlptracehttp package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go index ff4141b6d..121b02f5c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index 6a9c4d3a6..c857db056 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package otlpconfig provides configuration for the otlptrace exporters. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" import ( @@ -52,7 +53,9 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials - Proxy HTTPTransportProxyFunc + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go index bc4db0595..6a52b58cc 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go index dd6f12b22..5b389cb03 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go index 9e04a9bc1..418e66428 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go index 86c4819f4..e9d35c7fa 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go index 3559c5664..cfe21dbfb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -153,3 +153,19 @@ func WithRetry(rc RetryConfig) Option { func WithProxy(pf HTTPTransportProxyFunc) Option { return wrappedOption{otlpconfig.WithProxy(otlpconfig.HTTPTransportProxyFunc(pf))} } + +// WithHTTPClient sets the HTTP client to used by the exporter. +// +// This option will take precedence over [WithProxy], [WithTimeout], +// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE, +// OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT, +// OTEL_EXPORTER_OTLP_TRACES_TIMEOUT environment variables. +// +// Timeout and all other fields of the passed [http.Client] are left intact. +// +// Be aware that passing an HTTP client with transport like +// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can +// cause the client to be instrumented twice and cause infinite recursion. +func WithHTTPClient(c *http.Client) Option { + return wrappedOption{otlpconfig.WithHTTPClient(c)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index f5cad46b7..5f78bfdfb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.35.0" + return "1.36.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index 660675dd6..ceb2d63e2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -5,11 +5,13 @@ package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( "strings" + "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" ) @@ -25,6 +27,12 @@ type config struct { resourceAttributesFilter attribute.Filter } +var logDeprecatedLegacyScheme = sync.OnceFunc(func() { + global.Warn( + "prometheus exporter legacy scheme deprecated: support for the legacy NameValidationScheme will be removed in a future release", + ) +}) + // newConfig creates a validated config configured with options. func newConfig(opts ...Option) config { cfg := config{} @@ -132,7 +140,8 @@ func WithoutScopeInfo() Option { // have special behavior based on their name. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { - if model.NameValidationScheme != model.UTF8Validation { + if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + logDeprecatedLegacyScheme() // Only sanitize if prometheus does not support UTF-8. ns = model.EscapeName(ns, model.NameEscapingScheme) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index a8677e93a..e0959641c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "errors" "fmt" + "math" "slices" "strings" "sync" @@ -40,7 +41,15 @@ const ( spanIDExemplarKey = "span_id" ) -var errScopeInvalid = errors.New("invalid scope") +var ( + errScopeInvalid = errors.New("invalid scope") + + metricsPool = sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, + } +) // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -144,9 +153,9 @@ func (c *collector) Describe(ch chan<- *prometheus.Desc) { // // This method is safe to call concurrently. func (c *collector) Collect(ch chan<- prometheus.Metric) { - // TODO (#3047): Use a sync.Pool instead of allocating metrics every Collect. - metrics := metricdata.ResourceMetrics{} - err := c.reader.Collect(context.TODO(), &metrics) + metrics := metricsPool.Get().(*metricdata.ResourceMetrics) + defer metricsPool.Put(metrics) + err := c.reader.Collect(context.TODO(), metrics) if err != nil { if errors.Is(err, metric.ErrReaderShutdown) { return @@ -233,6 +242,10 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { addHistogramMetric(ch, v, m, name, kv) case metricdata.Histogram[float64]: addHistogramMetric(ch, v, m, name, kv) + case metricdata.ExponentialHistogram[int64]: + addExponentialHistogramMetric(ch, v, m, name, kv) + case metricdata.ExponentialHistogram[float64]: + addExponentialHistogramMetric(ch, v, m, name, kv) case metricdata.Sum[int64]: addSumMetric(ch, v, m, name, kv) case metricdata.Sum[float64]: @@ -246,7 +259,67 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } } -func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) { +func addExponentialHistogramMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + histogram metricdata.ExponentialHistogram[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { + for _, dp := range histogram.DataPoints { + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) + + desc := prometheus.NewDesc(name, m.Description, keys, nil) + + // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one. + positiveBuckets := make(map[int]int64) + for i, c := range dp.PositiveBucket.Counts { + if c > math.MaxInt64 { + otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c)) + continue + } + positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + } + + negativeBuckets := make(map[int]int64) + for i, c := range dp.NegativeBucket.Counts { + if c > math.MaxInt64 { + otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c)) + continue + } + negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. + } + + m, err := prometheus.NewConstNativeHistogram( + desc, + dp.Count, + float64(dp.Sum), + positiveBuckets, + negativeBuckets, + dp.ZeroCount, + dp.Scale, + dp.ZeroThreshold, + dp.StartTime, + values...) + if err != nil { + otel.Handle(err) + continue + } + + // TODO(GiedriusS): add exemplars here after https://github.com/prometheus/client_golang/pull/1654#pullrequestreview-2434669425 is done. + ch <- m + } +} + +func addHistogramMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + histogram metricdata.Histogram[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { for _, dp := range histogram.DataPoints { keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) @@ -270,7 +343,13 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra } } -func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) { +func addSumMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + sum metricdata.Sum[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { valueType := prometheus.CounterValue if !sum.IsMonotonic { valueType = prometheus.GaugeValue @@ -296,7 +375,13 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata } } -func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) { +func addGaugeMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + gauge metricdata.Gauge[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { for _, dp := range gauge.DataPoints { keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) @@ -319,7 +404,7 @@ func getAttrs(attrs attribute.Set) ([]string, []string) { values := make([]string, 0, attrs.Len()) itr := attrs.Iter() - if model.NameValidationScheme == model.UTF8Validation { + if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. // Do not perform sanitization if prometheus supports UTF-8. for itr.Next() { kv := itr.Attribute() @@ -405,8 +490,9 @@ var unitSuffixes = map[string]string{ // getName returns the sanitized name, prefixed with the namespace and suffixed with unit. func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { name := m.Name - if model.NameValidationScheme != model.UTF8Validation { + if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. // Only sanitize if prometheus does not support UTF-8. + logDeprecatedLegacyScheme() name = model.EscapeName(name, model.NameEscapingScheme) } addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER @@ -436,11 +522,13 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { // underscore when the escaping scheme is underscore escaping. This is meant to // capture any character that should be considered a "delimiter". func convertsToUnderscore(b rune) bool { - return !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == ':' || (b >= '0' && b <= '9')) + return (b < 'a' || b > 'z') && (b < 'A' || b > 'Z') && b != ':' && (b < '0' || b > '9') } func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { switch v := m.Data.(type) { + case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]: + return dto.MetricType_HISTOGRAM.Enum() case metricdata.Histogram[int64], metricdata.Histogram[float64]: return dto.MetricType_HISTOGRAM.Enum() case metricdata.Sum[float64]: diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go index fc155d79f..76f15b96b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go @@ -131,7 +131,9 @@ func redactAggregationTimestamps(orig metricdata.Aggregation) metricdata.Aggrega } } -func redactHistogramTimestamps[T int64 | float64](hdp []metricdata.HistogramDataPoint[T]) []metricdata.HistogramDataPoint[T] { +func redactHistogramTimestamps[T int64 | float64]( + hdp []metricdata.HistogramDataPoint[T], +) []metricdata.HistogramDataPoint[T] { out := make([]metricdata.HistogramDataPoint[T], len(hdp)) for i, dp := range hdp { out[i] = metricdata.HistogramDataPoint[T]{ diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go index 18cbd1cb2..b7a085c63 100644 --- a/vendor/go.opentelemetry.io/otel/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/log/doc.go @@ -4,10 +4,19 @@ /* Package log provides the OpenTelemetry Logs API. -This package is intended to be used by bridges between existing logging -libraries and OpenTelemetry. Users should not directly use this package as a -logging library. Instead, install one of the bridges listed in the -[registry], and use the associated logging library. +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/log] for the official +OpenTelemetry implementation of this API. + +The log package provides the OpenTelemetry Logs API, which serves as a standard +interface for generating and managing log records within the OpenTelemetry ecosystem. +This package allows users to emit LogRecords, enabling structured, context-rich logging +that can be easily integrated with observability tools. It ensures that log data is captured +in a way that is consistent with OpenTelemetry's data model. + +This package can be used to create bridges between existing logging libraries and OpenTelemetry. +Log bridges allow integrating the existing logging setups with OpenTelemetry. +Log bridges can be found in the [registry]. # API Implementations diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go index a3714c4c6..9b401b2b1 100644 --- a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go @@ -4,33 +4,33 @@ // Package embedded provides interfaces embedded within the [OpenTelemetry Logs // Bridge API]. // -// Implementers of the [OpenTelemetry Logs Bridge API] can embed the relevant +// Implementers of the [OpenTelemetry Logs API] can embed the relevant // type from this package into their implementation directly. Doing so will // result in a compilation error for users when the [OpenTelemetry Logs Bridge // API] is extended (which is something that can happen without a major version // bump of the API package). // -// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log +// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log package embedded // import "go.opentelemetry.io/otel/log/embedded" -// LoggerProvider is embedded in the [Logs Bridge API LoggerProvider]. +// LoggerProvider is embedded in the [Logs API LoggerProvider]. // -// Embed this interface in your implementation of the [Logs Bridge API +// Embed this interface in your implementation of the [Logs API // LoggerProvider] if you want users to experience a compilation error, // signaling they need to update to your latest implementation, when the [Logs // Bridge API LoggerProvider] interface is extended (which is something that // can happen without a major version bump of the API package). // -// [Logs Bridge API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider +// [Logs API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider type LoggerProvider interface{ loggerProvider() } -// Logger is embedded in [Logs Bridge API Logger]. +// Logger is embedded in [Logs API Logger]. // -// Embed this interface in your implementation of the [Logs Bridge API Logger] +// Embed this interface in your implementation of the [Logs API Logger] // if you want users to experience a compilation error, signaling they need to -// update to your latest implementation, when the [Logs Bridge API Logger] +// update to your latest implementation, when the [Logs API Logger] // interface is extended (which is something that can happen without a major // version bump of the API package). // -// [Logs Bridge API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger +// [Logs API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger type Logger interface{ logger() } diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go index 73e4e7dca..87d1a8275 100644 --- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go +++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go @@ -301,7 +301,7 @@ func (v Value) String() string { case KindBool: return strconv.FormatBool(v.asBool()) case KindBytes: - return fmt.Sprint(v.asBytes()) + return fmt.Sprint(v.asBytes()) // nolint:staticcheck // Use fmt.Sprint to encode as slice. case KindMap: return fmt.Sprint(v.asMap()) case KindSlice: diff --git a/vendor/go.opentelemetry.io/otel/log/noop/noop.go b/vendor/go.opentelemetry.io/otel/log/noop/noop.go index f45a7c7e0..d779e5d80 100644 --- a/vendor/go.opentelemetry.io/otel/log/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/log/noop/noop.go @@ -4,14 +4,14 @@ // Package noop provides an implementation of the [OpenTelemetry Logs Bridge // API] that produces no telemetry and minimizes used computation resources. // -// Using this package to implement the [OpenTelemetry Logs Bridge API] will +// Using this package to implement the [OpenTelemetry Logs API] will // effectively disable OpenTelemetry. // // This implementation can be embedded in other implementations of the -// [OpenTelemetry Logs Bridge API]. Doing so will mean the implementation +// [OpenTelemetry Logs API]. Doing so will mean the implementation // defaults to no operation for methods it does not implement. // -// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log +// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log package noop // import "go.opentelemetry.io/otel/log/noop" import ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go index 28c969262..b91741d58 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go @@ -156,13 +156,20 @@ func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) { global.Warn("dropped log records", "dropped", d) } - qLen := b.q.TryDequeue(buf, func(r []Record) bool { - ok := b.exporter.EnqueueExport(r) - if ok { - buf = slices.Clone(buf) - } - return ok - }) + var qLen int + // Don't copy data from queue unless exporter can accept more, it is very expensive. + if b.exporter.Ready() { + qLen = b.q.TryDequeue(buf, func(r []Record) bool { + ok := b.exporter.EnqueueExport(r) + if ok { + buf = slices.Clone(buf) + } + return ok + }) + } else { + qLen = b.q.Len() + } + if qLen >= b.batchSize { // There is another full batch ready. Immediately trigger // another export attempt. @@ -272,6 +279,13 @@ func newQueue(size int) *queue { } } +func (q *queue) Len() int { + q.Lock() + defer q.Unlock() + + return q.len +} + // Dropped returns the number of Records dropped during enqueueing since the // last time Dropped was called. func (q *queue) Dropped() uint64 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go index 6a1f1b0e9..78935de63 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go @@ -31,6 +31,6 @@ is being run on. That way when multiple instances of the code are collected at a single endpoint their origin is decipherable. See [go.opentelemetry.io/otel/log] for more information about -the OpenTelemetry Logs Bridge API. +the OpenTelemetry Logs API. */ package log // import "go.opentelemetry.io/otel/sdk/log" diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go index e4e3c5402..8cef5dde6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go @@ -186,11 +186,10 @@ type bufferExporter struct { // newBufferExporter returns a new bufferExporter that wraps exporter. The // returned bufferExporter will buffer at most size number of export requests. -// If size is less than zero, zero will be used (i.e. only synchronous -// exporting will be supported). +// If size is less than 1, 1 will be used. func newBufferExporter(exporter Exporter, size int) *bufferExporter { - if size < 0 { - size = 0 + if size < 1 { + size = 1 } input := make(chan exportData, size) return &bufferExporter{ @@ -201,6 +200,10 @@ func newBufferExporter(exporter Exporter, size int) *bufferExporter { } } +func (e *bufferExporter) Ready() bool { + return len(e.input) != cap(e.input) +} + var errStopped = errors.New("exporter stopped") func (e *bufferExporter) enqueue(ctx context.Context, records []Record, rCh chan<- error) error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go index 5b99a4a99..a39cad9e0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go @@ -8,7 +8,6 @@ import ( "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" ) // FilterProcessor is a [Processor] that knows, and can identify, what [Record] @@ -56,7 +55,6 @@ type FilterProcessor interface { // EnabledParameters represents payload for [FilterProcessor]'s Enabled method. type EnabledParameters struct { - Resource resource.Resource InstrumentationScope instrumentation.Scope Severity log.Severity } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index 6211d5d92..cd3580ec0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -50,7 +50,6 @@ func (l *logger) Emit(ctx context.Context, r log.Record) { // returned if it can be positively verified that no Processor will process. func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { p := EnabledParameters{ - Resource: *l.provider.resource, InstrumentationScope: l.instrumentationScope, Severity: param.Severity, } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go index 096944ea1..359357b7e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go @@ -236,7 +236,7 @@ func WithAttributeCountLimit(limit int) LoggerProviderOption { }) } -// AttributeValueLengthLimit sets the maximum allowed attribute value length. +// WithAttributeValueLengthLimit sets the maximum allowed attribute value length. // // This limit only applies to string and string slice attribute values. // Any string longer than this value will be truncated to this length. diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go index e1b7c457c..d1d24fe62 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LogsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceReq // All implementations must embed UnimplementedLogsServiceServer // for forward compatibility type LogsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) mustEmbedUnimplementedLogsServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go index 31d25fc15..fc668643c 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServ // All implementations must embed UnimplementedMetricsServiceServer // for forward compatibility type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) mustEmbedUnimplementedMetricsServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index dd1b73f1e..892864ea6 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceR // All implementations must embed UnimplementedTraceServiceServer // for forward compatibility type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) mustEmbedUnimplementedTraceServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 852209b09..a7c5d19bf 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -430,6 +430,101 @@ func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { return 0 } +// A reference to an Entity. +// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. +// +// Status: [Development] +type EntityRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Schema URL, if known. This is the identifier of the Schema that the entity data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // + // This schema_url applies to the data in this message and to the Resource attributes + // referenced by id_keys and description_keys. + // TODO: discuss if we are happy with this somewhat complicated definition of what + // the schema_url applies to. + // + // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. + SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Defines the type of the entity. MUST not change during the lifetime of the entity. + // For example: "service" or "host". This field is required and MUST not be empty + // for valid entities. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Attribute Keys that identify the entity. + // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. + // These keys MUST exist in the containing {message}.attributes. + IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"` + // Descriptive (non-identifying) attribute keys of the entity. + // MAY change over the lifetime of the entity. MAY be empty. + // These attribute keys are not part of entity's identity. + // These keys MUST exist in the containing {message}.attributes. + DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"` +} + +func (x *EntityRef) Reset() { + *x = EntityRef{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityRef) ProtoMessage() {} + +func (x *EntityRef) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityRef.ProtoReflect.Descriptor instead. +func (*EntityRef) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} +} + +func (x *EntityRef) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +func (x *EntityRef) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EntityRef) GetIdKeys() []string { + if x != nil { + return x.IdKeys + } + return nil +} + +func (x *EntityRef) GetDescriptionKeys() []string { + if x != nil { + return x.DescriptionKeys + } + return nil +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -488,15 +583,23 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, + 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, + 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -511,13 +614,14 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope + (*EntityRef)(nil), // 5: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -599,6 +703,18 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { return nil } } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ (*AnyValue_StringValue)(nil), @@ -615,7 +731,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go index 9b47481ce..eb5b8df6b 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go @@ -501,8 +501,6 @@ type LogRecord struct { // as an event. // // [Optional]. - // - // Status: [Development] EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"` } diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go index 8799d6ba2..ec187b13d 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -526,7 +526,7 @@ type Metric struct { // description of the metric, which can be used in documentation. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` // unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. + // described by https://unitsofmeasure.org/ucum.html. Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` // Data determines the aggregation type (if any) of the metric, what is the // reported value type for the data points, as well as the relatationship to @@ -929,7 +929,7 @@ func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit // Summary metric data are used to convey quantile summaries, // a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) -// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. @@ -1175,7 +1175,9 @@ type HistogramDataPoint struct { // The sum of the bucket_counts must equal the value in the count field. // // The number of elements in bucket_counts array must be by one greater than - // the number of elements in explicit_bounds array. + // the number of elements in explicit_bounds array. The exception to this rule + // is when the length of bucket_counts is 0, then the length of explicit_bounds + // must also be 0. BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` // explicit_bounds specifies buckets with explicitly defined bounds for values. // @@ -1190,6 +1192,9 @@ type HistogramDataPoint struct { // Histogram buckets are inclusive of their upper boundary, except the last // bucket where the boundary is at infinity. This format is intentionally // compatible with the OpenMetrics histogram definition. + // + // If bucket_counts length is 0 then explicit_bounds length must also be 0, + // otherwise the data point is invalid. ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` // (Optional) List of exemplars collected from // measurements that were used to form the data point diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index b7545b03b..eb7745d66 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -48,6 +48,12 @@ type Resource struct { // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Set of entities that participate in this Resource. + // + // Note: keys in the references MUST exist in attributes of this message. + // + // Status: [Development] + EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"` } func (x *Resource) Reset() { @@ -96,6 +102,13 @@ func (x *Resource) GetDroppedAttributesCount() uint32 { return 0 } +func (x *Resource) GetEntityRefs() []*v1.EntityRef { + if x != nil { + return x.EntityRefs + } + return nil +} + var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ @@ -106,7 +119,7 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, @@ -115,16 +128,21 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, - 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, + 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x52, 0x0a, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x73, 0x42, 0x83, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1f, 0x4f, + 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -141,16 +159,18 @@ func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ - (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource - (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*v1.EntityRef)(nil), // 2: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: opentelemetry.proto.resource.v1.Resource.entity_refs:type_name -> opentelemetry.proto.common.v1.EntityRef + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index f388426b0..d083dde3e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3cd9a5bb8..e017ef071 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -703,6 +703,65 @@ type QuotaFailure_Violation struct { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"` + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"` + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"` + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"` + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"` } func (x *QuotaFailure_Violation) Reset() { @@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string { return "" } +func (x *QuotaFailure_Violation) GetApiService() string { + if x != nil { + return x.ApiService + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaMetric() string { + if x != nil { + return x.QuotaMetric + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaId() string { + if x != nil { + return x.QuotaId + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string { + if x != nil { + return x.QuotaDimensions + } + return nil +} + +func (x *QuotaFailure_Violation) GetQuotaValue() int64 { + if x != nil { + return x.QuotaValue + } + return 0 +} + +func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 { + if x != nil && x.FutureQuotaValue != nil { + return *x.FutureQuotaValue + } + return 0 +} + // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { state protoimpl.MessageState @@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct { func (x *PreconditionFailure_Violation) Reset() { *x = PreconditionFailure_Violation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string { func (*PreconditionFailure_Violation) ProtoMessage() {} func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct { func (x *BadRequest_FieldViolation) Reset() { *x = BadRequest_FieldViolation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string { func (*BadRequest_FieldViolation) ProtoMessage() {} func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -958,7 +1059,7 @@ type Help_Link struct { func (x *Help_Link) Reset() { *x = Help_Link{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -971,7 +1072,7 @@ func (x *Help_Link) String() string { func (*Help_Link) ProtoMessage() {} func (x *Help_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, - 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, - 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, - 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, - 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, - 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, - 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71, + 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19, + 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75, + 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, + 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, + 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, + 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, + 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte { return file_google_rpc_error_details_proto_rawDescData } -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo (*RetryInfo)(nil), // 1: google.rpc.RetryInfo @@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage nil, // 10: google.rpc.ErrorInfo.MetadataEntry (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation - (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation - (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation - (*Help_Link)(nil), // 14: google.rpc.Help.Link - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 15: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 16: google.protobuf.Duration } var file_google_rpc_error_details_proto_depIdxs = []int32{ 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry - 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation - 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation - 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } @@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PreconditionFailure_Violation); i { case 0: return &v.state @@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BadRequest_FieldViolation); i { case 0: return &v.state @@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Help_Link); i { case 0: return &v.state @@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() { } } } + file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_rpc_error_details_proto_rawDesc, NumEnums: 0, - NumMessages: 15, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 6ad1b1c1d..06a3f7106 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d5ed172ae..4d576876d 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[balancer.SubConn](), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -65,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[balancer.SubConn] scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := resolver.NewAddressMap() + addrsSet := resolver.NewAddressMapV2[any]() for _, a := range s.ResolverState.Addresses { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { @@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } for _, a := range b.subConns.Keys() { - sci, _ := b.subConns.Get(a) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(a) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() { // Filter out all ready SCs from full subConn map. for _, addr := range b.subConns.Keys() { - sci, _ := b.subConns.Get(addr) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(addr) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 421c4fecc..cc606f4da 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -73,7 +73,7 @@ func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilde esOpts: esOpts, childBuilder: childBuilder, } - es.children.Store(resolver.NewEndpointMap()) + es.children.Store(resolver.NewEndpointMap[*balancerWrapper]()) return es } @@ -90,7 +90,7 @@ type endpointSharding struct { // calls into a child. To avoid deadlocks, do not acquire childMu while // holding mu. childMu sync.Mutex - children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper + children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]] // inhibitChildUpdates is set during UpdateClientConnState/ResolverError // calls (calls to children will each produce an update, only want one @@ -122,7 +122,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState var ret error children := es.children.Load() - newChildren := resolver.NewEndpointMap() + newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. for _, endpoint := range state.ResolverState.Endpoints { @@ -131,9 +131,8 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState // update. continue } - var childBalancer *balancerWrapper - if val, ok := children.Get(endpoint); ok { - childBalancer = val.(*balancerWrapper) + childBalancer, ok := children.Get(endpoint) + if ok { // Endpoint attributes may have changed, update the stored endpoint. es.mu.Lock() childBalancer.childState.Endpoint = endpoint @@ -166,7 +165,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState for _, e := range children.Keys() { child, _ := children.Get(e) if _, ok := newChildren.Get(e); !ok { - child.(*balancerWrapper).closeLocked() + child.closeLocked() } } es.children.Store(newChildren) @@ -189,7 +188,7 @@ func (es *endpointSharding) ResolverError(err error) { }() children := es.children.Load() for _, child := range children.Values() { - child.(*balancerWrapper).resolverErrorLocked(err) + child.resolverErrorLocked(err) } } @@ -202,7 +201,7 @@ func (es *endpointSharding) Close() { defer es.childMu.Unlock() children := es.children.Load() for _, child := range children.Values() { - child.(*balancerWrapper).closeLocked() + child.closeLocked() } } @@ -222,8 +221,7 @@ func (es *endpointSharding) updateState() { childStates := make([]ChildState, 0, children.Len()) for _, child := range children.Values() { - bw := child.(*balancerWrapper) - childState := bw.childState + childState := child.childState childStates = append(childStates, childState) childPicker := childState.State.Picker switch childState.State.ConnectivityState { diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 113181e6b..494314f23 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -122,7 +122,7 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) target: bo.Target.String(), metricsRecorder: cc.MetricsRecorder(), - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[*scData](), state: connectivity.Connecting, cancelConnectionTimer: func() {}, } @@ -220,7 +220,7 @@ type pickfirstBalancer struct { // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[*scData] addressList addressList firstPass bool numTF int @@ -319,7 +319,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState prevAddr := b.addressList.currentAddress() prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) // If the previous ready SubConn exists in new address list, @@ -381,21 +381,21 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.numTF = 0 // Reset the connection attempt record for existing SubConns. for _, sd := range b.subConns.Values() { - sd.(*scData).connectionFailedInFirstPass = false + sd.connectionFailedInFirstPass = false } b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { - sd.(*scData).subConn.Shutdown() + sd.subConn.Shutdown() } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() } // deDupAddresses ensures that each address appears only once in the slice. func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMap() + seenAddrs := resolver.NewAddressMapV2[*scData]() retAddrs := []resolver.Address{} for _, addr := range addrs { @@ -481,7 +481,7 @@ func addressFamily(address string) ipAddrFamily { // This ensures that the subchannel map accurately reflects the current set of // addresses received from the name resolver. func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMap() + newAddrsMap := resolver.NewAddressMapV2[bool]() for _, addr := range newAddrs { newAddrsMap.Set(addr, true) } @@ -491,7 +491,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) continue } val, _ := b.subConns.Get(oldAddr) - val.(*scData).subConn.Shutdown() + val.subConn.Shutdown() b.subConns.Delete(oldAddr) } } @@ -500,13 +500,12 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { b.cancelConnectionTimer() - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if sd.subConn != selected.subConn { sd.subConn.Shutdown() } } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() b.subConns.Set(selected.addr, selected) } @@ -539,18 +538,17 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.subConns.Set(curAddr, sd) } - scd := sd.(*scData) - switch scd.rawConnectivityState { + switch sd.rawConnectivityState { case connectivity.Idle: - scd.subConn.Connect() + sd.subConn.Connect() b.scheduleNextConnectionLocked() return case connectivity.TransientFailure: // The SubConn is being re-used and failed during a previous pass // over the addressList. It has not completed backoff yet. // Mark it as having failed and try the next address. - scd.connectionFailedInFirstPass = true - lastErr = scd.lastErr + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr continue case connectivity.Connecting: // Wait for the connection attempt to complete or the timer to fire @@ -558,7 +556,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.scheduleNextConnectionLocked() return default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) return } @@ -753,8 +751,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } // Connect() has been called on all the SubConns. The first pass can be // ended if all the SubConns have reported a failure. - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if !sd.connectionFailedInFirstPass { return } @@ -765,8 +762,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } @@ -927,6 +923,5 @@ func (al *addressList) hasNext() bool { // fields that are meaningful to the SubConn. func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) && - a.Metadata == b.Metadata + a.Attributes.Equal(b.Attributes) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b2f8fc7f4..825c31795 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.5 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a319ef979..4f350ca56 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1231,8 +1231,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: + if r == transport.GoAwayTooManyPings { v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() if v > ac.cc.keepaliveParams.Time { diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 94177b05c..faa59e418 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.5 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -178,6 +178,87 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { return HealthCheckResponse_UNKNOWN } +type HealthListRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListRequest) Reset() { + *x = HealthListRequest{} + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListRequest) ProtoMessage() {} + +func (x *HealthListRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead. +func (*HealthListRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2} +} + +type HealthListResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // statuses contains all the services and their respective status. + Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListResponse) Reset() { + *x = HealthListResponse{} + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListResponse) ProtoMessage() {} + +func (x *HealthListResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead. +func (*HealthListResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3} +} + +func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { + if x != nil { + return x.Statuses + } + return nil +} + var File_grpc_health_v1_health_proto protoreflect.FileDescriptor var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ @@ -198,25 +279,44 @@ var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x03, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, + 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x0d, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xfd, 0x01, + 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x05, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x70, 0x0a, + 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x76, 0x31, 0xa2, + 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x56, 0x31, 0xaa, 0x02, + 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( @@ -232,23 +332,30 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { } var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse + (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest + (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse + nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry } var file_grpc_health_v1_health_proto_depIdxs = []int32{ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus - 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest - 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest - 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse - 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry + 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse + 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest + 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse + 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_grpc_health_v1_health_proto_init() } @@ -262,7 +369,7 @@ func file_grpc_health_v1_health_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, - NumMessages: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f96b8ab49..93136610e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_List_FullMethodName = "/grpc.health.v1.Health/List" Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" ) @@ -55,9 +56,19 @@ type HealthClient interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } +func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthListResponse) + err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) @@ -128,9 +149,19 @@ type HealthServer interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(context.Context, *HealthListRequest) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -159,6 +190,9 @@ type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } +func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } @@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_List_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).List(ctx, req.(*HealthListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HealthCheckRequest) if err := stream.RecvMsg(m); err != nil { @@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{ MethodName: "Check", Handler: _Health_Check_Handler, }, + { + MethodName: "List", + Handler: _Health_List_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 1e42b6fdc..cc5713fd9 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -51,10 +51,24 @@ var ( // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be enabled by + // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "true". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) + // to "false". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) + + // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash + // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the + // implementation of A76 is stable, we will flip the default value to false + // in a subsequent release. A final release will remove this environment + // variable, enabling the new behavior unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + + // RingHashSetRequestHashKey is set if the ring hash balancer can get the + // request hash header by setting the "requestHashHeader" field, according + // to gRFC A76. It can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 13e1f386b..2ce012cda 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -259,6 +259,13 @@ var ( // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for // testing purposes. SetBufferPoolingThresholdForTesting any // func(int) + + // TimeAfterFunc is used to create timers. During tests the function is + // replaced to track allocated timers and fail the test if a timer isn't + // cancelled. + TimeAfterFunc = func(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) + } ) // HealthChecker defines the signature of the client-side LB channel health @@ -300,3 +307,9 @@ type EnforceSubConnEmbedding interface { type EnforceClientConnEmbedding interface { enforceClientConnEmbedding() } + +// Timer is an interface to allow injecting different time.Timer implementations +// during tests. +type Timer interface { + Stop() bool +} diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 900bfb716..c4055bc00 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool { return false } -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { +// ValidateKey validates a key with the following rules (pseudo-headers are +// skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +func ValidateKey(key string) error { // key should not be empty if key == "" { return fmt.Errorf("there is an empty key in the header") @@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error { return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) } } + return nil +} + +// ValidatePair validates a key-value pair with the following rules +// (pseudo-header are skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding +// value is performed. +// - the characters in every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + if err := ValidateKey(key); err != nil { + return err + } if strings.HasSuffix(key, "-bin") { return nil } diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index a6c647013..c0e227577 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -28,6 +28,8 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -40,19 +42,26 @@ var ( // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as -// a intermediary between the gRPC ClientConn and the child resolvers. +// an intermediary between the gRPC ClientConn and the child resolvers. // // It implements the [resolver.Resolver] interface. type delegatingResolver struct { - target resolver.Target // parsed target URI to be resolved - cc resolver.ClientConn // gRPC ClientConn - targetResolver resolver.Resolver // resolver for the target URI, based on its scheme - proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured - proxyURL *url.URL // proxy URL, derived from proxy environment and target + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + proxyURL *url.URL // proxy URL, derived from proxy environment and target + // We do not hold both mu and childMu in the same goroutine. Avoid holding + // both locks when calling into the child, as the child resolver may + // synchronously callback into the channel. mu sync.Mutex // protects all the fields below targetResolverState *resolver.State // state of the target resolver proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured + + // childMu serializes calls into child resolvers. It also protects access to + // the following fields. + childMu sync.Mutex + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured } // nopResolver is a resolver that does nothing. @@ -62,8 +71,8 @@ func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {} func (nopResolver) Close() {} -// proxyURLForTarget determines the proxy URL for the given address based on -// the environment. It can return the following: +// proxyURLForTarget determines the proxy URL for the given address based on the +// environment. It can return the following: // - nil URL, nil error: No proxy is configured or the address is excluded // using the `NO_PROXY` environment variable or if req.URL.Host is // "localhost" (with or without // a port number) @@ -82,7 +91,8 @@ func proxyURLForTarget(address string) (*url.URL, error) { // resolvers: // - one to resolve the proxy address specified using the supported // environment variables. This uses the registered resolver for the "dns" -// scheme. +// scheme. It is lazily built when a target resolver update contains at least +// one TCP address. // - one to resolve the target URI using the resolver specified by the scheme // in the target URI or specified by the user using the WithResolvers dial // option. As a special case, if the target URI's scheme is "dns" and a @@ -91,8 +101,10 @@ func proxyURLForTarget(address string) (*url.URL, error) { // resolution is enabled using the dial option. func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) { r := &delegatingResolver{ - target: target, - cc: cc, + target: target, + cc: cc, + proxyResolver: nopResolver{}, + targetResolver: nopResolver{}, } var err error @@ -111,41 +123,34 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti logger.Infof("Proxy URL detected : %s", r.proxyURL) } + // Resolver updates from one child may trigger calls into the other. Block + // updates until the children are initialized. + r.childMu.Lock() + defer r.childMu.Unlock() // When the scheme is 'dns' and target resolution on client is not enabled, // resolution should be handled by the proxy, not the client. Therefore, we // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { - state := resolver.State{ + r.targetResolverState = &resolver.State{ Addresses: []resolver.Address{{Addr: target.Endpoint()}}, Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, } - r.targetResolverState = &state - } else { - wcc := &wrappingClientConn{ - stateListener: r.updateTargetResolverState, - parent: r, - } - if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { - return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) - } + r.updateTargetResolverState(*r.targetResolverState) + return r, nil } - - if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err) + wcc := &wrappingClientConn{ + stateListener: r.updateTargetResolverState, + parent: r, } - - if r.targetResolver == nil { - r.targetResolver = nopResolver{} - } - if r.proxyResolver == nil { - r.proxyResolver = nopResolver{} + if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) } return r, nil } -// proxyURIResolver creates a resolver for resolving proxy URIs using the -// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and -// builds a resolver with a wrappingClientConn to capture resolved addresses. +// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns" +// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds +// a resolver with a wrappingClientConn to capture resolved addresses. func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) { proxyBuilder := resolver.Get("dns") if proxyBuilder == nil { @@ -165,11 +170,15 @@ func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resol } func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.childMu.Lock() + defer r.childMu.Unlock() r.targetResolver.ResolveNow(o) r.proxyResolver.ResolveNow(o) } func (r *delegatingResolver) Close() { + r.childMu.Lock() + defer r.childMu.Unlock() r.targetResolver.Close() r.targetResolver = nil @@ -177,18 +186,43 @@ func (r *delegatingResolver) Close() { r.proxyResolver = nil } -// updateClientConnStateLocked creates a list of combined addresses by -// pairing each proxy address with every target address. For each pair, it -// generates a new [resolver.Address] using the proxy address, and adding the -// target address as the attribute along with user info. It returns nil if -// either resolver has not sent update even once and returns the error from -// ClientConn update once both resolvers have sent update atleast once. +func networkTypeFromAddr(addr resolver.Address) string { + networkType, ok := networktype.Get(addr) + if !ok { + networkType, _ = transport.ParseDialTarget(addr.Addr) + } + return networkType +} + +func isTCPAddressPresent(state *resolver.State) bool { + for _, addr := range state.Addresses { + if networkType := networkTypeFromAddr(addr); networkType == "tcp" { + return true + } + } + for _, endpoint := range state.Endpoints { + for _, addr := range endpoint.Addresses { + if networktype := networkTypeFromAddr(addr); networktype == "tcp" { + return true + } + } + } + return false +} + +// updateClientConnStateLocked constructs a combined list of addresses by +// pairing each proxy address with every target address of type TCP. For each +// pair, it creates a new [resolver.Address] using the proxy address and +// attaches the corresponding target address and user info as attributes. Target +// addresses that are not of type TCP are appended to the list as-is. The +// function returns nil if either resolver has not yet provided an update, and +// returns the result of ClientConn.UpdateState once both resolvers have +// provided at least one update. func (r *delegatingResolver) updateClientConnStateLocked() error { if r.targetResolverState == nil || r.proxyAddrs == nil { return nil } - curState := *r.targetResolverState // If multiple resolved proxy addresses are present, we send only the // unresolved proxy host and let net.Dial handle the proxy host name // resolution when creating the transport. Sending all resolved addresses @@ -206,24 +240,30 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { } var addresses []resolver.Address for _, targetAddr := range (*r.targetResolverState).Addresses { + // Avoid proxy when network is not tcp. + if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" { + addresses = append(addresses, targetAddr) + continue + } addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{ User: r.proxyURL.User, ConnectAddr: targetAddr.Addr, })) } - // Create a list of combined endpoints by pairing all proxy endpoints - // with every target endpoint. Each time, it constructs a new - // [resolver.Endpoint] using the all addresses from all the proxy endpoint - // and the target addresses from one endpoint. The target address and user - // information from the proxy URL are added as attributes to the proxy - // address.The resulting list of addresses is then grouped into endpoints, - // covering all combinations of proxy and target endpoints. + // For each target endpoint, construct a new [resolver.Endpoint] that + // includes all addresses from all proxy endpoints and the addresses from + // that target endpoint, preserving the number of target endpoints. var endpoints []resolver.Endpoint for _, endpt := range (*r.targetResolverState).Endpoints { var addrs []resolver.Address - for _, proxyAddr := range r.proxyAddrs { - for _, targetAddr := range endpt.Addresses { + for _, targetAddr := range endpt.Addresses { + // Avoid proxy when network is not tcp. + if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" { + addrs = append(addrs, targetAddr) + continue + } + for _, proxyAddr := range r.proxyAddrs { addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{ User: r.proxyURL.User, ConnectAddr: targetAddr.Addr, @@ -234,8 +274,9 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { } // Use the targetResolverState for its service config and attributes // contents. The state update is only sent after both the target and proxy - // resolvers have sent their updates, and curState has been updated with - // the combined addresses. + // resolvers have sent their updates, and curState has been updated with the + // combined addresses. + curState := *r.targetResolverState curState.Addresses = addresses curState.Endpoints = endpoints return r.cc.UpdateState(curState) @@ -245,7 +286,8 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { // addresses and endpoints, marking the resolver as ready, and triggering a // state update if both proxy and target resolvers are ready. If the ClientConn // returns a non-nil error, it calls `ResolveNow()` on the target resolver. It -// is a StateListener function of wrappingClientConn passed to the proxy resolver. +// is a StateListener function of wrappingClientConn passed to the proxy +// resolver. func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error { r.mu.Lock() defer r.mu.Unlock() @@ -253,8 +295,8 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro logger.Infof("Addresses received from proxy resolver: %s", state.Addresses) } if len(state.Endpoints) > 0 { - // We expect exactly one address per endpoint because the proxy - // resolver uses "dns" resolution. + // We expect exactly one address per endpoint because the proxy resolver + // uses "dns" resolution. r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints)) for _, endpoint := range state.Endpoints { r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...) @@ -267,20 +309,29 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro err := r.updateClientConnStateLocked() // Another possible approach was to block until updates are received from // both resolvers. But this is not used because calling `New()` triggers - // `Build()` for the first resolver, which calls `UpdateState()`. And the + // `Build()` for the first resolver, which calls `UpdateState()`. And the // second resolver hasn't sent an update yet, so it would cause `New()` to // block indefinitely. if err != nil { - r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.targetResolver != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() } return err } -// updateTargetResolverState updates the target resolver state by storing target -// addresses, endpoints, and service config, marking the resolver as ready, and -// triggering a state update if both resolvers are ready. If the ClientConn -// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It -// is a StateListener function of wrappingClientConn passed to the target resolver. +// updateTargetResolverState is the StateListener function provided to the +// target resolver via wrappingClientConn. It updates the resolver state and +// marks the target resolver as ready. If the update includes at least one TCP +// address and the proxy resolver has not yet been constructed, it initializes +// the proxy resolver. A combined state update is triggered once both resolvers +// are ready. If all addresses are non-TCP, it proceeds without waiting for the +// proxy resolver. If ClientConn.UpdateState returns a non-nil error, +// ResolveNow() is called on the proxy resolver. func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error { r.mu.Lock() defer r.mu.Unlock() @@ -289,9 +340,40 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err logger.Infof("Addresses received from target resolver: %v", state.Addresses) } r.targetResolverState = &state + // If no addresses returned by resolver have network type as tcp , do not + // wait for proxy update. + if !isTCPAddressPresent(r.targetResolverState) { + return r.cc.UpdateState(*r.targetResolverState) + } + + // The proxy resolver may be rebuilt multiple times, specifically each time + // the target resolver sends an update, even if the target resolver is built + // successfully but building the proxy resolver fails. + if len(r.proxyAddrs) == 0 { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if _, ok := r.proxyResolver.(nopResolver); !ok { + return + } + proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{}) + if err != nil { + r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err)) + return + } + r.proxyResolver = proxyResolver + }() + } + err := r.updateClientConnStateLocked() if err != nil { - r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.proxyResolver != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() } return nil } @@ -311,7 +393,8 @@ func (wcc *wrappingClientConn) UpdateState(state resolver.State) error { return wcc.stateListener(state) } -// ReportError intercepts errors from the child resolvers and passes them to ClientConn. +// ReportError intercepts errors from the child resolvers and passes them to +// ClientConn. func (wcc *wrappingClientConn) ReportError(err error) { wcc.parent.cc.ReportError(err) } @@ -322,8 +405,8 @@ func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) { wcc.UpdateState(resolver.State{Addresses: addrs}) } -// ParseServiceConfig parses the provided service config and returns an -// object that provides the parsed config. +// ParseServiceConfig parses the provided service config and returns an object +// that provides the parsed config. func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON) } diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index 8ed347c54..ccc0e017e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -59,7 +59,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { return b, err } -// Close closes the stream and popagates err to any readers. +// Close closes the stream and propagates err to any readers. func (s *ClientStream) Close(err error) { var ( rst bool diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 513dbb93d..171e690a3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -176,7 +176,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error return fn(ctx, address) } if !ok { - networkType, address = parseDialTarget(address) + networkType, address = ParseDialTarget(address) } if opts, present := proxyattributes.Get(addr); present { return proxyDial(ctx, addr, grpcUA, opts) @@ -1242,7 +1242,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode = codes.DeadlineExceeded } } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) + st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode) + t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false) } func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { @@ -1390,8 +1391,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { if string(f.DebugData()) == "too_many_pings" { t.goAwayReason = GoAwayTooManyPings } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 997b0a59b..7e53eb173 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" @@ -598,6 +599,22 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade if len(t.activeStreams) == 1 { t.idle = time.Time{} } + // Start a timer to close the stream on reaching the deadline. + if timeoutSet { + // We need to wait for s.cancel to be updated before calling + // t.closeStream to avoid data races. + cancelUpdated := make(chan struct{}) + timer := internal.TimeAfterFunc(timeout, func() { + <-cancelUpdated + t.closeStream(s, true, http2.ErrCodeCancel, false) + }) + oldCancel := s.cancel + s.cancel = func() { + oldCancel() + timer.Stop() + } + close(cancelUpdated) + } t.mu.Unlock() if channelz.IsOn() { t.channelz.SocketMetrics.StreamsStarted.Add(1) @@ -1274,7 +1291,6 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { - t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { delete(t.activeStreams, s.id) @@ -1324,7 +1340,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() - s.swapState(streamDone) + oldState := s.swapState(streamDone) + if oldState == streamDone { + return + } t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 3613d7b64..f997f9fdb 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -439,8 +439,8 @@ func getWriteBufferPool(size int) *sync.Pool { return pool } -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { +// ParseDialTarget returns the network and address to pass to dialer. +func ParseDialTarget(target string) (string, string) { net := "tcp" m1 := strings.Index(target, ":") m2 := strings.Index(target, ":/") diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index a22a90151..cf8da0b52 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -35,8 +35,10 @@ type ServerStream struct { *Stream // Embed for common stream functionality. st internalServerTransport - ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) - cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + // cancel is invoked at the end of stream to cancel ctx. It also stops the + // timer for monitoring the rpc deadline if configured. + cancel func() // Holds compressor names passed in grpc-accept-encoding metadata from the // client. diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb7..c3c15ac96 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -18,16 +18,28 @@ package resolver -type addressMapEntry struct { +import ( + "encoding/base64" + "sort" + "strings" +) + +type addressMapEntry[T any] struct { addr Address - value any + value T } -// AddressMap is a map of addresses to arbitrary values taking into account +// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming +// release of grpc-go. +// +// Deprecated: use the generic AddressMapV2 type instead. +type AddressMap = AddressMapV2[any] + +// AddressMapV2 is a map of addresses to arbitrary values taking into account // Attributes. BalancerAttributes are ignored, as are Metadata and Type. // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. -type AddressMap struct { +type AddressMapV2[T any] struct { // The underlying map is keyed by an Address with fields that we don't care // about being set to their zero values. The only fields that we care about // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to @@ -41,23 +53,30 @@ type AddressMap struct { // The value type of the map contains a slice of addresses which match the key // in their `Addr` and `ServerName` fields and contain the corresponding value // associated with them. - m map[Address]addressMapEntryList + m map[Address]addressMapEntryList[T] } func toMapKey(addr *Address) Address { return Address{Addr: addr.Addr, ServerName: addr.ServerName} } -type addressMapEntryList []*addressMapEntry +type addressMapEntryList[T any] []*addressMapEntry[T] -// NewAddressMap creates a new AddressMap. +// NewAddressMap creates a new AddressMapV2[any]. +// +// Deprecated: use the generic NewAddressMapV2 constructor instead. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[Address]addressMapEntryList)} + return NewAddressMapV2[any]() +} + +// NewAddressMapV2 creates a new AddressMapV2. +func NewAddressMapV2[T any]() *AddressMapV2[T] { + return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. -func (l addressMapEntryList) find(addr Address) int { +func (l addressMapEntryList[T]) find(addr Address) int { for i, entry := range l { // Attributes are the only thing to match on here, since `Addr` and // `ServerName` are already equal. @@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { +func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { +func (a *AddressMapV2[T]) Set(addr Address, value T) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { entryList[entry].value = value return } - a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value}) } // Delete removes addr from the map. -func (a *AddressMap) Delete(addr Address) { +func (a *AddressMapV2[T]) Delete(addr Address) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] entry := entryList.find(addr) @@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) { } // Len returns the number of entries in the map. -func (a *AddressMap) Len() int { +func (a *AddressMapV2[T]) Len() int { ret := 0 for _, entryList := range a.m { ret += len(entryList) @@ -116,7 +135,7 @@ func (a *AddressMap) Len() int { } // Keys returns a slice of all current map keys. -func (a *AddressMap) Keys() []Address { +func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { @@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) +func (a *AddressMapV2[T]) Values() []T { + ret := make([]T, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any { return ret } -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} +type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any +type EndpointMap[T any] struct { + endpoints map[endpointMapKey]endpointData[T] +} + +type endpointData[T any] struct { + // decodedKey stores the original key to avoid decoding when iterating on + // EndpointMap keys. + decodedKey Endpoint + value T } // NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), +func NewEndpointMap[T any]() *EndpointMap[T] { + return &EndpointMap[T]{ + endpoints: make(map[endpointMapKey]endpointData[T]), } } +// encodeEndpoint returns a string that uniquely identifies the unordered set of +// addresses within an endpoint. +func encodeEndpoint(e Endpoint) endpointMapKey { + addrs := make([]string, 0, len(e.Addresses)) + // base64 encoding the address strings restricts the characters present + // within the strings. This allows us to use a delimiter without the need of + // escape characters. + for _, addr := range e.Addresses { + addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr))) + } + sort.Strings(addrs) + // " " should not appear in base64 encoded strings. + return endpointMapKey(strings.Join(addrs, " ")) +} + // Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true +func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) { + val, found := em.endpoints[encodeEndpoint(e)] + if found { + return val.value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return +func (em *EndpointMap[T]) Set(e Endpoint, value T) { + en := encodeEndpoint(e) + em.endpoints[en] = endpointData[T]{ + decodedKey: Endpoint{Addresses: e.Addresses}, + value: value, } - em.endpoints[&en] = value } // Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { +func (em *EndpointMap[T]) Len() int { return len(em.endpoints) } @@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { +func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) + for _, en := range em.endpoints { + ret = append(ret, en.decodedKey) } return ret } // Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) +func (em *EndpointMap[T]) Values() []T { + ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { - ret = append(ret, val) + ret = append(ret, val.value) } return ret } -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - // Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } +func (em *EndpointMap[T]) Delete(e Endpoint) { + en := encodeEndpoint(e) + delete(em.endpoints, en) } diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 945e24ff8..80e16a327 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -134,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } + s.Endpoints = addressesToEndpoints(s.Addresses) } ccr.addChannelzTraceEvent(s) ccr.curState = s @@ -172,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.cc.mu.Unlock() return } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + s := resolver.State{ + Addresses: addrs, + ServiceConfig: ccr.curState.ServiceConfig, + Endpoints: addressesToEndpoints(addrs), + } ccr.addChannelzTraceEvent(s) ccr.curState = s ccr.mu.Unlock() @@ -210,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } + +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint { + endpoints := make([]resolver.Endpoint, 0, len(addrs)) + for _, a := range addrs { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + endpoints = append(endpoints, ep) + } + return endpoints +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index a8ddb0af5..ad20e9dff 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -870,13 +870,19 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) } - out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool) + // Read at most one byte more than the limit from the decompressor. + // Unless the limit is MaxInt64, in which case, that's impossible, so + // apply no limit. + if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 { + dcReader = io.LimitReader(dcReader, limit+1) + } + out, err := mem.ReadAll(dcReader, pool) if err != nil { out.Free() return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) } - if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) { + if out.Len() > maxReceiveMessageSize { out.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) } @@ -885,12 +891,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") } -// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF. -func atEOF(dcReader io.Reader) bool { - n, err := dcReader.Read(make([]byte, 1)) - return n == 0 && err == io.EOF -} - type recvCompressor interface { RecvCompress() string } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 6f20d2d54..baf7740ef 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,7 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC attempt begins. +// Begin contains stats for the start of an RPC attempt. +// +// - Server-side: Triggered after `InHeader`, as headers are processed +// before the RPC lifecycle begins. +// - Client-side: The first stats event recorded. +// // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. @@ -69,7 +74,7 @@ func (*PickerUpdated) IsClient() bool { return true } func (*PickerUpdated) isRPCStats() {} -// InPayload contains the information for an incoming payload. +// InPayload contains stats about an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool @@ -98,7 +103,9 @@ func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} -// InHeader contains stats when a header is received. +// InHeader contains stats about header reception. +// +// - Server-side: The first stats event after the RPC request is received. type InHeader struct { // Client is true if this InHeader is from client side. Client bool @@ -123,7 +130,7 @@ func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} -// InTrailer contains stats when a trailer is received. +// InTrailer contains stats about trailer reception. type InTrailer struct { // Client is true if this InTrailer is from client side. Client bool @@ -139,7 +146,7 @@ func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} -// OutPayload contains the information for an outgoing payload. +// OutPayload contains stats about an outgoing payload. type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool @@ -166,7 +173,10 @@ func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} -// OutHeader contains stats when a header is sent. +// OutHeader contains stats about header transmission. +// +// - Client-side: Only occurs after 'Begin', as headers are always the first +// thing sent on a stream. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool @@ -189,14 +199,15 @@ func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} -// OutTrailer contains stats when a trailer is sent. +// OutTrailer contains stats about trailer transmission. type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. // - // Deprecated: This field is never set. The length is not known when this message is - // emitted because the trailer fields are compressed with hpack after that. + // Deprecated: This field is never set. The length is not known when this + // message is emitted because the trailer fields are compressed with hpack + // after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. @@ -208,7 +219,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} -// End contains stats when an RPC ends. +// End contains stats about RPC completion. type End struct { // Client is true if this End is from client side. Client bool @@ -238,7 +249,7 @@ type ConnStats interface { IsClient() bool } -// ConnBegin contains the stats of a connection when it is established. +// ConnBegin contains stats about connection establishment. type ConnBegin struct { // Client is true if this ConnBegin is from client side. Client bool @@ -249,7 +260,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client } func (s *ConnBegin) isConnStats() {} -// ConnEnd contains the stats of a connection when it ends. +// ConnEnd contains stats about connection termination. type ConnEnd struct { // Client is true if this ConnEnd is from client side. Client bool diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 783c41f78..51da8ed59 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.71.0" +const Version = "1.72.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index 1e12dc9e5..80fd676ee 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -215,6 +215,9 @@ code.superseriousbusiness.org/oauth2/v4/generates code.superseriousbusiness.org/oauth2/v4/manage code.superseriousbusiness.org/oauth2/v4/models code.superseriousbusiness.org/oauth2/v4/server +# codeberg.org/gruf/go-bitutil v1.1.0 +## explicit; go 1.19 +codeberg.org/gruf/go-bitutil # codeberg.org/gruf/go-bytesize v1.0.3 ## explicit; go 1.17 codeberg.org/gruf/go-bytesize @@ -361,9 +364,9 @@ github.com/bytedance/sonic/loader/internal/abi github.com/bytedance/sonic/loader/internal/iasm/expr github.com/bytedance/sonic/loader/internal/iasm/x86_64 github.com/bytedance/sonic/loader/internal/rt -# github.com/cenkalti/backoff/v4 v4.3.0 -## explicit; go 1.18 -github.com/cenkalti/backoff/v4 +# github.com/cenkalti/backoff/v5 v5.0.2 +## explicit; go 1.23 +github.com/cenkalti/backoff/v5 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -583,8 +586,8 @@ github.com/gorilla/sessions # github.com/gorilla/websocket v1.5.3 ## explicit; go 1.12 github.com/gorilla/websocket -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 -## explicit; go 1.22 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 +## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -763,15 +766,15 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/promhttp/internal -# github.com/prometheus/client_model v0.6.1 -## explicit; go 1.19 +# github.com/prometheus/client_model v0.6.2 +## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.62.0 -## explicit; go 1.21 +# github.com/prometheus/common v0.64.0 +## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.15.1 -## explicit; go 1.20 +# github.com/prometheus/procfs v0.16.1 +## explicit; go 1.23.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -990,14 +993,14 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/bridges/prometheus v0.60.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 +## explicit; go 1.23.0 go.opentelemetry.io/contrib/bridges/prometheus -# go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 +## explicit; go 1.23.0 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 +## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/runtime go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime go.opentelemetry.io/contrib/instrumentation/runtime/internal/x @@ -1018,64 +1021,64 @@ go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.57.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.58.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdoutlog -# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace -# go.opentelemetry.io/otel/log v0.11.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/log v0.12.2 +## explicit; go 1.23.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded go.opentelemetry.io/otel/log/noop @@ -1093,8 +1096,8 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/log v0.11.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/sdk/log v0.12.2 +## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/log # go.opentelemetry.io/otel/sdk/metric v1.36.0 ## explicit; go 1.23.0 @@ -1110,8 +1113,8 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.5.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/proto/otlp v1.6.0 +## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/logs/v1 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -1245,15 +1248,15 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a -## explicit; go 1.22 +# google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 +## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a -## explicit; go 1.22 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 +## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.71.0 -## explicit; go 1.22.0 +# google.golang.org/grpc v1.72.1 +## explicit; go 1.23 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff diff --git a/web/source/nollamas/index.js b/web/source/nollamas/index.js index 4e162182c..e8ae0a75c 100644 --- a/web/source/nollamas/index.js +++ b/web/source/nollamas/index.js @@ -43,18 +43,18 @@ document.addEventListener("DOMContentLoaded", function() { // Read the challenge and difficulty from // data attributes on the nollamas section. + const seed = nollamas.dataset.nollamasSeed; const challenge = nollamas.dataset.nollamasChallenge; - const difficulty = nollamas.dataset.nollamasDifficulty; - console.log("challenge:", challenge); // eslint-disable-line no-console - console.log("difficulty:", difficulty); // eslint-disable-line no-console + console.log("seed:", seed); // eslint-disable-line no-console + console.log("challenge:", challenge); // eslint-disable-line no-console // Prepare the worker with task function. const worker = new Worker("/assets/dist/nollamasworker.js"); const startTime = performance.now(); worker.postMessage({ challenge: challenge, - difficulty: difficulty, + seed: seed, }); // Set the main worker function. diff --git a/web/source/nollamasworker/index.js b/web/source/nollamasworker/index.js index 2762b125e..3c9b043c2 100644 --- a/web/source/nollamasworker/index.js +++ b/web/source/nollamasworker/index.js @@ -19,32 +19,22 @@ import sha256 from "./sha256"; -let compute = async function(challengeStr, diffStr) { +let compute = async function(seedStr, challengeStr) { const textEncoder = new TextEncoder(); - // Get difficulty1 as number and generate - // expected zero ASCII prefix to check for. - const diff1 = parseInt(diffStr, 10); - const zeros = "0".repeat(diff1); - - // Calculate hex encoded prefix required to check solution, where we - // need diff1 no. chars in hex, and hex encoding doubles input length. - const prefixLen = diff1 / 2 + (diff1 % 2 != 0 ? 2 : 0); - let nonce = 0; while (true) { // eslint-disable-line no-constant-condition // Create possible solution string from challenge string + nonce. - const solution = textEncoder.encode(challengeStr + nonce.toString()); + const solution = textEncoder.encode(seedStr + nonce.toString()); - // Generate SHA256 hashsum of solution string, and hex encode the - // necessary prefix length we need to check for a valid solution. - const prefixArray = Array.from(sha256(solution).slice(0, prefixLen)); - const prefixHex = prefixArray.map(b => b.toString(16).padStart(2, "0")).join(""); + // Generate hex encoded SHA256 hashsum of solution. + const hashArray = Array.from(sha256(solution)); + const hashAsHex = hashArray.map(b => b.toString(16).padStart(2, "0")).join(""); - // Check if the hex encoded hash has - // difficulty defined zeroes prefix. - if (prefixHex.startsWith(zeros)) { + // Check whether hex encoded + // solution matches challenge. + if (hashAsHex == challengeStr) { return nonce; } @@ -56,11 +46,8 @@ let compute = async function(challengeStr, diffStr) { onmessage = async function(e) { console.log('worker started'); // eslint-disable-line no-console - const challenge = e.data.challenge; - const difficulty = e.data.difficulty; - - // Compute the nonce that produces solution with args. - let nonce = await compute(challenge, difficulty); + // Compute nonce value that produces 'challenge' for seed. + let nonce = await compute(e.data.seed, e.data.challenge); // Post the solution nonce back to caller. postMessage({ nonce: nonce, done: true }); diff --git a/web/source/package.json b/web/source/package.json index 3cb70e9a6..80dbb114e 100644 --- a/web/source/package.json +++ b/web/source/package.json @@ -16,6 +16,7 @@ "blurhash": "^2.0.5", "get-by-dot": "^1.0.2", "html-to-text": "^9.0.5", + "humanize-duration": "^3.32.2", "is-valid-domain": "^0.1.6", "js-file-download": "^0.4.12", "langs": "^2.0.0", @@ -48,6 +49,7 @@ "@browserify/uglifyify": "^6.0.0", "@joepie91/eslint-config": "^1.1.1", "@types/html-to-text": "^9.0.4", + "@types/humanize-duration": "^3.27.4", "@types/is-valid-domain": "^0.0.2", "@types/papaparse": "^5.3.9", "@types/parse-link-header": "^2.0.3", diff --git a/web/source/settings/lib/query/gts-api.ts b/web/source/settings/lib/query/gts-api.ts index 9d38e435d..33429d8a8 100644 --- a/web/source/settings/lib/query/gts-api.ts +++ b/web/source/settings/lib/query/gts-api.ts @@ -26,7 +26,7 @@ import type { import { serialize as serializeForm } from "object-to-formdata"; import type { FetchBaseQueryMeta } from "@reduxjs/toolkit/dist/query/fetchBaseQuery"; import type { RootState } from '../../redux/store'; -import { InstanceV1 } from '../types/instance'; +import { InstanceV1, InstanceV2 } from '../types/instance'; /** * GTSFetchArgs extends standard FetchArgs used by @@ -186,6 +186,11 @@ export const gtsApi = createApi({ query: () => ({ url: `/api/v1/instance` }) + }), + instanceV2: build.query({ + query: () => ({ + url: `/api/v2/instance` + }) }) }) }); @@ -193,8 +198,13 @@ export const gtsApi = createApi({ /** * Query /api/v1/instance to retrieve basic instance information. * This endpoint does not require authentication/authorization. - * TODO: move this to ./instance. */ const useInstanceV1Query = gtsApi.useInstanceV1Query; -export { useInstanceV1Query }; +/** + * Query /api/v2/instance to retrieve basic instance information. + * This endpoint does not require authentication/authorization. + */ +const useInstanceV2Query = gtsApi.useInstanceV2Query; + +export { useInstanceV1Query, useInstanceV2Query }; diff --git a/web/source/settings/lib/query/user/domainperms.ts b/web/source/settings/lib/query/user/domainperms.ts new file mode 100644 index 000000000..3d8e77bfe --- /dev/null +++ b/web/source/settings/lib/query/user/domainperms.ts @@ -0,0 +1,53 @@ +/* + GoToSocial + Copyright (C) GoToSocial Authors admin@gotosocial.org + SPDX-License-Identifier: AGPL-3.0-or-later + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . +*/ + +import { gtsApi } from "../gts-api"; + +import type { DomainPerm } from "../../types/domain-permission"; + +const extended = gtsApi.injectEndpoints({ + endpoints: (build) => ({ + instanceDomainBlocks: build.query({ + query: () => ({ + url: `/api/v1/instance/domain_blocks` + }), + }), + + instanceDomainAllows: build.query({ + query: () => ({ + url: `/api/v1/instance/domain_allows` + }) + }), + }), +}); + +/** + * Get user-level view of all explicitly blocked domains. + */ +const useInstanceDomainBlocksQuery = extended.useInstanceDomainBlocksQuery; + +/** + * Get user-level view of all explicitly allowed domains. + */ +const useInstanceDomainAllowsQuery = extended.useInstanceDomainAllowsQuery; + +export { + useInstanceDomainBlocksQuery, + useInstanceDomainAllowsQuery, +}; diff --git a/web/source/settings/lib/types/domain-permission.ts b/web/source/settings/lib/types/domain-permission.ts index 27c4b56c9..3e947db61 100644 --- a/web/source/settings/lib/types/domain-permission.ts +++ b/web/source/settings/lib/types/domain-permission.ts @@ -33,6 +33,7 @@ export interface DomainPerm { obfuscate?: boolean; private_comment?: string; public_comment?: string; + comment?: string; created_at?: string; created_by?: string; subscription_id?: string; diff --git a/web/source/settings/lib/types/instance.ts b/web/source/settings/lib/types/instance.ts index 9abdc6a96..87d129d92 100644 --- a/web/source/settings/lib/types/instance.ts +++ b/web/source/settings/lib/types/instance.ts @@ -17,36 +17,52 @@ along with this program. If not, see . */ +import { Account } from "./account"; + export interface InstanceV1 { - uri: string; - account_domain: string; - title: string; - description: string; + uri: string; + account_domain: string; + title: string; + description: string; description_text?: string; - short_description: string; + short_description: string; short_description_text?: string; - custom_css: string; - email: string; - version: string; - debug?: boolean; - languages: any[]; // TODO: define this - registrations: boolean; - approval_required: boolean; - invites_enabled: boolean; - configuration: InstanceConfiguration; - urls: InstanceUrls; - stats: InstanceStats; - thumbnail: string; - contact_account: Object; // TODO: define this. - max_toot_chars: number; - rules: any[]; // TODO: define this - terms?: string; + custom_css: string; + email: string; + version: string; + debug?: boolean; + languages: string[]; + registrations: boolean; + approval_required: boolean; + invites_enabled: boolean; + configuration: InstanceV1Configuration; + urls: InstanceV1Urls; + stats: InstanceStats; + thumbnail: string; + contact_account: Account; + max_toot_chars: number; + rules: any[]; // TODO: define this + terms?: string; terms_text?: string; } -export interface InstanceConfiguration { +export interface InstanceV2 { + domain: string; + account_domain: string; + title: string; + version: string; + debug: boolean; + source_url: string; + description: string; + custom_css: string; + thumbnail: InstanceV2Thumbnail; + languages: string[]; + configuration: InstanceV2Configuration; +} + +export interface InstanceV1Configuration { statuses: InstanceStatuses; - media_attachments: InstanceMediaAttachments; + media_attachments: InstanceV1MediaAttachments; polls: InstancePolls; accounts: InstanceAccounts; emojis: InstanceEmojis; @@ -63,15 +79,6 @@ export interface InstanceEmojis { emoji_size_limit: number; } -export interface InstanceMediaAttachments { - supported_mime_types: string[]; - image_size_limit: number; - image_matrix_limit: number; - video_size_limit: number; - video_frame_rate_limit: number; - video_matrix_limit: number; -} - export interface InstancePolls { max_options: number; max_characters_per_option: number; @@ -92,7 +99,46 @@ export interface InstanceStats { user_count: number; } -export interface InstanceUrls { +export interface InstanceV1Urls { streaming_api: string; } +export interface InstanceV1MediaAttachments { + supported_mime_types: string[]; + image_size_limit: number; + image_matrix_limit: number; + video_size_limit: number; + video_frame_rate_limit: number; + video_matrix_limit: number; +} + +export interface InstanceV2Configuration { + urls: InstanceV2URLs; + accounts: InstanceAccounts; + statuses: InstanceStatuses; + media_attachments: InstanceV2MediaAttachments; + polls: InstancePolls; + translation: InstanceV2Translation; + emojis: InstanceEmojis; +} + +export interface InstanceV2MediaAttachments extends InstanceV1MediaAttachments { + description_limit: number; +} + +export interface InstanceV2Thumbnail { + url: string; + thumbnail_type?: string; + static_url?: string; + thumbnail_static_type?: string; + thumbnail_description?: string; + blurhash?: string; +} + +export interface InstanceV2Translation { + enabled: boolean; +} + +export interface InstanceV2URLs { + streaming: string; +} diff --git a/web/source/settings/lib/util/index.ts b/web/source/settings/lib/util/index.ts index 8bcf5ab5d..46b35fd70 100644 --- a/web/source/settings/lib/util/index.ts +++ b/web/source/settings/lib/util/index.ts @@ -22,6 +22,8 @@ import { useMemo } from "react"; import { AdminAccount } from "../types/account"; import { store } from "../../redux/store"; +import humanizeDuration from "humanize-duration"; + export function yesOrNo(b: boolean): string { return b ? "yes" : "no"; } @@ -54,3 +56,43 @@ export function useCapitalize(i?: string): string { return i.charAt(0).toUpperCase() + i.slice(1); }, [i]); } + +/** + * Return human-readable string representation of given bytes. + * + * Adapted from https://stackoverflow.com/a/14919494. + */ +export function useHumanReadableBytes(bytes: number): string { + return useMemo(() => { + const thresh = 1024; + const digitPrecision = 2; + const r = 10**digitPrecision; + const units = ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']; + + if (Math.abs(bytes) < thresh) { + return bytes + ' B'; + } + + let u = -1; + let threshed = bytes; + do { threshed /= thresh; ++u; + } while (Math.round(Math.abs(threshed) * r) / r >= thresh && u < units.length - 1); + + return threshed.toFixed(digitPrecision) + ' ' + units[u]; + }, [bytes]); +} + +/** + * Return human-readable string representation of given time in seconds. + */ +export function useHumanReadableDuration(seconds: number): string { + return useMemo(() => { + if (seconds % 2629746 === 0) { + const n = seconds / 2629746; + return n + " month" + (n !== 1 ? "s" : ""); + } + + const ms = seconds*1000; + return humanizeDuration(ms); + }, [seconds]); +} diff --git a/web/source/settings/style.css b/web/source/settings/style.css index 67937bd9e..742407ea3 100644 --- a/web/source/settings/style.css +++ b/web/source/settings/style.css @@ -1549,6 +1549,50 @@ button.tab-button { } } +.instance-info-view { + .info-list .info-list-entry { + /* + Some of the labels are quite + long so ensure there's enough + gap when they're wrapped. + */ + gap: 1rem; + } + + /* + Make sure ellipsis works + properly for v. long domains. + */ + .list.domain-perm-list > .entry > .domain { + display: inline-block; + font-weight: bold; + } + + /* + Make sure we can break. + */ + .list.domain-perm-list > .entry > .public_comment { + word-wrap: anywhere; + } + + /* + Disable the hover effects as + these entries aren't clickable. + */ + .list.domain-perm-list > .entry:hover { + background: $list-entry-bg; + } + .list.domain-perm-list > .entry:nth-child(2n):hover { + background: $list-entry-alternate-bg; + } + .list.domain-perm-list > .entry { + &:active, &:focus, &:hover, &:target { + border-color: $gray1; + border-top-color: transparent; + } + } +} + .instance-rules { list-style-position: inside; margin: 0; diff --git a/web/source/settings/views/moderation/domain-permissions/detail.tsx b/web/source/settings/views/moderation/domain-permissions/detail.tsx index e8ef487e3..35be0e16d 100644 --- a/web/source/settings/views/moderation/domain-permissions/detail.tsx +++ b/web/source/settings/views/moderation/domain-permissions/detail.tsx @@ -307,14 +307,14 @@ function CreateOrUpdateDomainPerm({