[chore] update go dependencies (#4304)

- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-06-30 15:19:09 +02:00 committed by kim
commit 8b0ea56027
294 changed files with 139999 additions and 21873 deletions

View file

@ -5,6 +5,18 @@
//
// The metric events produced are:
//
// go.memory.used By Memory used by the Go runtime.
// go.memory.limit By Go runtime memory limit configured by the user, if a limit exists.
// go.memory.allocated By Memory allocated to the heap by the application.
// go.memory.allocations {allocation} Count of allocations to the heap by the application.
// go.memory.gc.goal By Heap size target for the end of the GC cycle.
// go.goroutine.count {goroutine} Count of live goroutines.
// go.processor.limit {thread} The number of OS threads that can execute user-level Go code simultaneously.
// go.config.gogc % Heap size target percentage configured by the user, otherwise 100.
//
// When the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable is set to
// true, the following deprecated metrics are produced:
//
// runtime.go.cgo.calls - Number of cgo calls made by the current process
// runtime.go.gc.count - Number of completed garbage collection cycles
// runtime.go.gc.pause_ns (ns) Amount of nanoseconds in GC stop-the-world pauses
@ -19,16 +31,4 @@
// runtime.go.mem.heap_sys (bytes) Bytes of heap memory obtained from the OS
// runtime.go.mem.live_objects - Number of live objects is the number of cumulative Mallocs - Frees
// runtime.uptime (ms) Milliseconds since application was initialized
//
// When the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable is set to
// false, the metrics produced are:
//
// go.memory.used By Memory used by the Go runtime.
// go.memory.limit By Go runtime memory limit configured by the user, if a limit exists.
// go.memory.allocated By Memory allocated to the heap by the application.
// go.memory.allocations {allocation} Count of allocations to the heap by the application.
// go.memory.gc.goal By Heap size target for the end of the GC cycle.
// go.goroutine.count {goroutine} Count of live goroutines.
// go.processor.limit {thread} The number of OS threads that can execute user-level Go code simultaneously.
// go.config.gogc % Heap size target percentage configured by the user, otherwise 100.
package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"

View file

@ -13,22 +13,13 @@ change in backwards incompatible ways as feedback is applied.
### Include Deprecated Metrics
Once new experimental runtime metrics are added, they will be produced
**in addition to** the existing runtime metrics. Users that migrate right away
can disable the old runtime metrics:
```console
export OTEL_GO_X_DEPRECATED_RUNTIME_METRICS=false
```
In a later release, the deprecated runtime metrics will stop being produced by
default. To temporarily re-enable the deprecated metrics:
To temporarily re-enable the deprecated metrics:
```console
export OTEL_GO_X_DEPRECATED_RUNTIME_METRICS=true
```
After two additional releases, the deprecated runtime metrics will be removed,
Eventually, the deprecated runtime metrics will be removed,
and setting the environment variable will no longer have any effect.
The value set must be the case-insensitive string of `"true"` to enable the

View file

@ -9,17 +9,17 @@ package x // import "go.opentelemetry.io/contrib/instrumentation/runtime/interna
import (
"os"
"strings"
"strconv"
)
// DeprecatedRuntimeMetrics is an experimental feature flag that defines if the deprecated
// runtime metrics should be produced. During development of the new
// conventions, it is enabled by default.
//
// To disable this feature set the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable
// to the case-insensitive string value of "false" (i.e. "False" and "FALSE"
// To enable this feature set the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var DeprecatedRuntimeMetrics = newFeature("DEPRECATED_RUNTIME_METRICS", true)
var DeprecatedRuntimeMetrics = newFeature("DEPRECATED_RUNTIME_METRICS", false)
// BoolFeature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
@ -43,11 +43,11 @@ func (f BoolFeature) Key() string { return f.key }
// Enabled returns if the feature is enabled.
func (f BoolFeature) Enabled() bool {
v := os.Getenv(f.key)
if strings.ToLower(v) == "false" {
return false
val, err := strconv.ParseBool(v)
if err != nil {
return f.defaultVal
}
if strings.ToLower(v) == "true" {
return true
}
return f.defaultVal
return val
}

View file

@ -12,6 +12,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/semconv/v1.34.0/goconv"
"go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime"
"go.opentelemetry.io/contrib/instrumentation/runtime/internal/x"
@ -43,78 +44,48 @@ func Start(opts ...Option) error {
metric.WithInstrumentationVersion(Version()),
)
if x.DeprecatedRuntimeMetrics.Enabled() {
return deprecatedruntime.Start(meter, c.MinimumReadMemStatsInterval)
if err := deprecatedruntime.Start(meter, c.MinimumReadMemStatsInterval); err != nil {
return err
}
}
memoryUsedInstrument, err := meter.Int64ObservableUpDownCounter(
"go.memory.used",
metric.WithUnit("By"),
metric.WithDescription("Memory used by the Go runtime."),
)
memoryUsed, err := goconv.NewMemoryUsed(meter)
if err != nil {
return err
}
memoryLimitInstrument, err := meter.Int64ObservableUpDownCounter(
"go.memory.limit",
metric.WithUnit("By"),
metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."),
)
memoryLimit, err := goconv.NewMemoryLimit(meter)
if err != nil {
return err
}
memoryAllocatedInstrument, err := meter.Int64ObservableCounter(
"go.memory.allocated",
metric.WithUnit("By"),
metric.WithDescription("Memory allocated to the heap by the application."),
)
memoryAllocated, err := goconv.NewMemoryAllocated(meter)
if err != nil {
return err
}
memoryAllocationsInstrument, err := meter.Int64ObservableCounter(
"go.memory.allocations",
metric.WithUnit("{allocation}"),
metric.WithDescription("Count of allocations to the heap by the application."),
)
memoryAllocations, err := goconv.NewMemoryAllocations(meter)
if err != nil {
return err
}
memoryGCGoalInstrument, err := meter.Int64ObservableUpDownCounter(
"go.memory.gc.goal",
metric.WithUnit("By"),
metric.WithDescription("Heap size target for the end of the GC cycle."),
)
memoryGCGoal, err := goconv.NewMemoryGCGoal(meter)
if err != nil {
return err
}
goroutineCountInstrument, err := meter.Int64ObservableUpDownCounter(
"go.goroutine.count",
metric.WithUnit("{goroutine}"),
metric.WithDescription("Count of live goroutines."),
)
goroutineCount, err := goconv.NewGoroutineCount(meter)
if err != nil {
return err
}
processorLimitInstrument, err := meter.Int64ObservableUpDownCounter(
"go.processor.limit",
metric.WithUnit("{thread}"),
metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."),
)
processorLimit, err := goconv.NewProcessorLimit(meter)
if err != nil {
return err
}
gogcConfigInstrument, err := meter.Int64ObservableUpDownCounter(
"go.config.gogc",
metric.WithUnit("%"),
metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."),
)
configGogc, err := goconv.NewConfigGogc(meter)
if err != nil {
return err
}
otherMemoryOpt := metric.WithAttributeSet(
attribute.NewSet(attribute.String("go.memory.type", "other")),
attribute.NewSet(memoryUsed.AttrMemoryType(goconv.MemoryTypeOther)),
)
stackMemoryOpt := metric.WithAttributeSet(
attribute.NewSet(attribute.String("go.memory.type", "stack")),
attribute.NewSet(memoryUsed.AttrMemoryType(goconv.MemoryTypeStack)),
)
collector := newCollector(c.MinimumReadMemStatsInterval, runtimeMetrics)
var lock sync.Mutex
@ -124,30 +95,30 @@ func Start(opts ...Option) error {
defer lock.Unlock()
collector.refresh()
stackMemory := collector.getInt(goHeapMemory)
o.ObserveInt64(memoryUsedInstrument, stackMemory, stackMemoryOpt)
o.ObserveInt64(memoryUsed.Inst(), stackMemory, stackMemoryOpt)
totalMemory := collector.getInt(goTotalMemory) - collector.getInt(goMemoryReleased)
otherMemory := totalMemory - stackMemory
o.ObserveInt64(memoryUsedInstrument, otherMemory, otherMemoryOpt)
o.ObserveInt64(memoryUsed.Inst(), otherMemory, otherMemoryOpt)
// Only observe the limit metric if a limit exists
if limit := collector.getInt(goMemoryLimit); limit != math.MaxInt64 {
o.ObserveInt64(memoryLimitInstrument, limit)
o.ObserveInt64(memoryLimit.Inst(), limit)
}
o.ObserveInt64(memoryAllocatedInstrument, collector.getInt(goMemoryAllocated))
o.ObserveInt64(memoryAllocationsInstrument, collector.getInt(goMemoryAllocations))
o.ObserveInt64(memoryGCGoalInstrument, collector.getInt(goMemoryGoal))
o.ObserveInt64(goroutineCountInstrument, collector.getInt(goGoroutines))
o.ObserveInt64(processorLimitInstrument, collector.getInt(goMaxProcs))
o.ObserveInt64(gogcConfigInstrument, collector.getInt(goConfigGC))
o.ObserveInt64(memoryAllocated.Inst(), collector.getInt(goMemoryAllocated))
o.ObserveInt64(memoryAllocations.Inst(), collector.getInt(goMemoryAllocations))
o.ObserveInt64(memoryGCGoal.Inst(), collector.getInt(goMemoryGoal))
o.ObserveInt64(goroutineCount.Inst(), collector.getInt(goGoroutines))
o.ObserveInt64(processorLimit.Inst(), collector.getInt(goMaxProcs))
o.ObserveInt64(configGogc.Inst(), collector.getInt(goConfigGC))
return nil
},
memoryUsedInstrument,
memoryLimitInstrument,
memoryAllocatedInstrument,
memoryAllocationsInstrument,
memoryGCGoalInstrument,
goroutineCountInstrument,
processorLimitInstrument,
gogcConfigInstrument,
memoryUsed.Inst(),
memoryLimit.Inst(),
memoryAllocated.Inst(),
memoryAllocations.Inst(),
memoryGCGoal.Inst(),
goroutineCount.Inst(),
processorLimit.Inst(),
configGogc.Inst(),
)
if err != nil {
return err

View file

@ -5,6 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"
// Version is the current release version of the runtime instrumentation.
func Version() string {
return "0.61.0"
return "0.62.0"
// This string is updated by the pre_release.sh script during release
}

3
vendor/go.opentelemetry.io/otel/.clomonitor.yml generated vendored Normal file
View file

@ -0,0 +1,3 @@
exemptions:
- check: artifacthub_badge
reason: "Artifact Hub doesn't support Go packages"

View file

@ -66,8 +66,6 @@ linters:
desc: Do not use cross-module internal packages.
- pkg: go.opentelemetry.io/otel/internal/internaltest
desc: Do not use cross-module internal packages.
- pkg: go.opentelemetry.io/otel/internal/matchers
desc: Do not use cross-module internal packages.
otlp-internal:
files:
- '!**/exporters/otlp/internal/**/*.go'
@ -190,6 +188,10 @@ linters:
- legacy
- std-error-handling
rules:
- linters:
- revive
path: schema/v.*/types/.*
text: avoid meaningless package names
# TODO: Having appropriate comments for exported objects helps development,
# even for objects in internal packages. Appropriate comments for all
# exported objects should be added and this exclusion removed.

View file

@ -11,6 +11,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
<!-- Released section -->
<!-- Don't change this section unless doing release -->
## [1.37.0/0.59.0/0.13.0] 2025-06-25
### Added
- The `go.opentelemetry.io/otel/semconv/v1.33.0` package.
The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799)
- The `go.opentelemetry.io/otel/semconv/v1.34.0` package.
The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812)
- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825)
- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825)
- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839)
### Changed
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836)
- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864)
- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898)
### Fixed
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710)
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710)
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710)
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710)
- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822)
- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914)
### Removed
- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770)
## [0.12.2] 2025-05-22
### Fixed
- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804)
- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804)
- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804)
## [0.12.1] 2025-05-21
### Fixes
- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800)
- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800)
- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800)
## [1.36.0/0.58.0/0.12.0] 2025-05-20
### Added
@ -3288,7 +3343,10 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0
[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0

View file

@ -109,10 +109,9 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the
maintainer merging.
* The qualified approvals need to be from [Approver]s/[Maintainer]s
affiliated with different companies. Two qualified approvals from
[Approver]s or [Maintainer]s affiliated with the same company counts as a
single qualified approval.
* At least one of the qualified approvals need to be from an
[Approver]/[Maintainer] affiliated with a different company than the author
of the PR.
* PRs introducing changes that have already been discussed and consensus
reached only need one qualified approval. The discussion and resolution
needs to be linked to the PR.
@ -650,11 +649,11 @@ should be canceled.
### Maintainers
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [David Ashpole](https://github.com/dashpole), Google
- [Robert Pająk](https://github.com/pellared), Splunk
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [Tyler Yahn](https://github.com/MrAlias), Splunk
- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70))
- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2))
- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
### Emeritus

View file

@ -293,7 +293,7 @@ semconv-generate: $(SEMCONVKIT)
--param tag=$(TAG) \
go \
/home/weaver/target
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
$(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)"
.PHONY: gorelease
gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)

View file

@ -7,6 +7,7 @@
[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go)
[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).

View file

@ -112,6 +112,29 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release.
### Sign the Release Artifact
To ensure we comply with CNCF best practices, we need to sign the release artifact.
The tarball attached to the GitHub release needs to be signed with your GPG key.
Follow [these steps] to sign the release artifact and upload it to GitHub.
You can use [this script] to verify the contents of the tarball before signing it.
Be sure to use the correct GPG key when signing the release artifact.
```terminal
gpg --local-user <key-id> --armor --detach-sign opentelemetry-go-<version>.tar.gz
```
You can verify the signature with:
```terminal
gpg --verify opentelemetry-go-<version>.tar.gz.asc opentelemetry-go-<version>.tar.gz
```
[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
[this script]: https://github.com/MrAlias/attest-sh
## Post-Release
### Contrib Repository

View file

@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python
FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver
FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python
FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown

View file

@ -5,6 +5,7 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
import (
"context"
"errors"
"fmt"
"time"
@ -192,7 +193,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
)
if c.exportTimeout > 0 {
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout"))
} else {
ctx, cancel = context.WithCancel(parent)
}
@ -228,6 +229,8 @@ func retryable(err error) (bool, time.Duration) {
func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
switch s.Code() {
// Follows the retryable error codes defined in
// https://opentelemetry.io/docs/specs/otlp/#failures
case codes.Canceled,
codes.DeadlineExceeded,
codes.Aborted,

View file

@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
return ctx.Err()
return context.Cause(ctx)
}
case <-timer.C:
}

View file

@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use.
func Version() string {
return "0.12.2"
return "0.13.0"
}

View file

@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
return ctx.Err()
return context.Cause(ctx)
}
case <-timer.C:
}

View file

@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use.
func Version() string {
return "0.12.2"
return "0.13.0"
}

View file

@ -5,6 +5,7 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
import (
"context"
"errors"
"time"
"google.golang.org/genproto/googleapis/rpc/errdetails"
@ -149,7 +150,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
)
if c.exportTimeout > 0 {
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout"))
} else {
ctx, cancel = context.WithCancel(parent)
}

View file

@ -105,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
return cfg
}
// cleanPath returns a path with all spaces trimmed and all redundancies
// removed. If urlPath is empty or cleaning it results in an empty string,
// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
// defaultPath is returned instead.
func cleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
tmp := strings.TrimSpace(urlPath)
if tmp == "" || tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {

View file

@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
return ctx.Err()
return context.Cause(ctx)
}
case <-timer.C:
}

View file

@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
func Version() string {
return "1.36.0"
return "1.37.0"
}

View file

@ -105,12 +105,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
return cfg
}
// cleanPath returns a path with all spaces trimmed and all redundancies
// removed. If urlPath is empty or cleaning it results in an empty string,
// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
// defaultPath is returned instead.
func cleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
tmp := strings.TrimSpace(urlPath)
if tmp == "" || tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {

View file

@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
return ctx.Err()
return context.Cause(ctx)
}
case <-timer.C:
}

View file

@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
func Version() string {
return "1.36.0"
return "1.37.0"
}

View file

@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
)
if c.exportTimeout > 0 {
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout"))
} else {
ctx, cancel = context.WithCancel(parent)
}

View file

@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
return cfg
}
// cleanPath returns a path with all spaces trimmed and all redundancies
// removed. If urlPath is empty or cleaning it results in an empty string,
// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
// defaultPath is returned instead.
func cleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
tmp := strings.TrimSpace(urlPath)
if tmp == "" || tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {

View file

@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
return ctx.Err()
return context.Cause(ctx)
}
case <-timer.C:
}

View file

@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config {
return cfg
}
// cleanPath returns a path with all spaces trimmed and all redundancies
// removed. If urlPath is empty or cleaning it results in an empty string,
// cleanPath returns a path with all spaces trimmed. If urlPath is empty,
// defaultPath is returned instead.
func cleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
tmp := strings.TrimSpace(urlPath)
if tmp == "" || tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {

View file

@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error {
select {
case <-timer.C:
default:
return ctx.Err()
return context.Cause(ctx)
}
case <-timer.C:
}

View file

@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
return "1.36.0"
return "1.37.0"
}

View file

@ -125,9 +125,8 @@ func WithoutCounterSuffixes() Option {
})
}
// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric.
// If not specified, the Exporter will create a otel_scope_info metric containing
// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points.
// WithoutScopeInfo configures the Exporter to not export
// labels about Instrumentation Scope to all metric points.
func WithoutScopeInfo() Option {
return optionFunc(func(cfg config) config {
cfg.disableScopeInfo = true
@ -136,7 +135,7 @@ func WithoutScopeInfo() Option {
}
// WithNamespace configures the Exporter to prefix metric with the given namespace.
// Metadata metrics such as target_info and otel_scope_info are not prefixed since these
// Metadata metrics such as target_info are not prefixed since these
// have special behavior based on their name.
func WithNamespace(ns string) Option {
return optionFunc(func(cfg config) config {

View file

@ -21,7 +21,6 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/resource"
@ -31,25 +30,20 @@ const (
targetInfoMetricName = "target_info"
targetInfoDescription = "Target metadata"
scopeInfoMetricName = "otel_scope_info"
scopeInfoDescription = "Instrumentation Scope metadata"
scopeNameLabel = "otel_scope_name"
scopeVersionLabel = "otel_scope_version"
scopeLabelPrefix = "otel_scope_"
scopeNameLabel = scopeLabelPrefix + "name"
scopeVersionLabel = scopeLabelPrefix + "version"
scopeSchemaLabel = scopeLabelPrefix + "schema_url"
traceIDExemplarKey = "trace_id"
spanIDExemplarKey = "span_id"
)
var (
errScopeInvalid = errors.New("invalid scope")
metricsPool = sync.Pool{
New: func() interface{} {
return &metricdata.ResourceMetrics{}
},
}
)
var metricsPool = sync.Pool{
New: func() interface{} {
return &metricdata.ResourceMetrics{}
},
}
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
// interface for easy instantiation with a MeterProvider.
@ -97,8 +91,6 @@ type collector struct {
mu sync.Mutex // mu protects all members below from the concurrent access.
disableTargetInfo bool
targetInfo prometheus.Metric
scopeInfos map[instrumentation.Scope]prometheus.Metric
scopeInfosInvalid map[instrumentation.Scope]struct{}
metricFamilies map[string]*dto.MetricFamily
resourceKeyVals keyVals
}
@ -122,8 +114,6 @@ func New(opts ...Option) (*Exporter, error) {
withoutUnits: cfg.withoutUnits,
withoutCounterSuffixes: cfg.withoutCounterSuffixes,
disableScopeInfo: cfg.disableScopeInfo,
scopeInfos: make(map[instrumentation.Scope]prometheus.Metric),
scopeInfosInvalid: make(map[instrumentation.Scope]struct{}),
metricFamilies: make(map[string]*dto.MetricFamily),
namespace: cfg.namespace,
resourceAttributesFilter: cfg.resourceAttributesFilter,
@ -202,20 +192,15 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
}
if !c.disableScopeInfo {
scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
if errors.Is(err, errScopeInvalid) {
// Do not report the same error multiple times.
continue
}
if err != nil {
otel.Handle(err)
continue
}
kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel)
kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL)
ch <- scopeInfo
kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel)
kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version)
attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes)
for i := range attrKeys {
attrKeys[i] = scopeLabelPrefix + attrKeys[i]
}
kv.keys = append(kv.keys, attrKeys...)
kv.vals = append(kv.vals, attrVals...)
}
kv.keys = append(kv.keys, c.resourceKeyVals.keys...)
@ -259,6 +244,59 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
}
}
// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution.
func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket {
if len(bucket.Counts) == 0 || scaleDelta < 1 {
return metricdata.ExponentialBucket{
Offset: bucket.Offset >> scaleDelta,
Counts: append([]uint64(nil), bucket.Counts...), // copy slice
}
}
// The new offset is scaled down
newOffset := bucket.Offset >> scaleDelta
// Pre-calculate the new bucket count to avoid growing slice
// Each group of 2^scaleDelta buckets will merge into one bucket
//nolint:gosec // Length is bounded by slice allocation
lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1
lastNewIdx := lastBucketIdx >> scaleDelta
newBucketCount := int(lastNewIdx - newOffset + 1)
if newBucketCount <= 0 {
return metricdata.ExponentialBucket{
Offset: newOffset,
Counts: []uint64{},
}
}
newCounts := make([]uint64, newBucketCount)
// Merge buckets according to the scale difference
for i, count := range bucket.Counts {
if count == 0 {
continue
}
// Calculate which new bucket this count belongs to
//nolint:gosec // Index is bounded by loop iteration
originalIdx := bucket.Offset + int32(i)
newIdx := originalIdx >> scaleDelta
// Calculate the position in the new counts array
position := newIdx - newOffset
//nolint:gosec // Length is bounded by allocation
if position >= 0 && position < int32(len(newCounts)) {
newCounts[position] += count
}
}
return metricdata.ExponentialBucket{
Offset: newOffset,
Counts: newCounts,
}
}
func addExponentialHistogramMetric[N int64 | float64](
ch chan<- prometheus.Metric,
histogram metricdata.ExponentialHistogram[N],
@ -273,23 +311,43 @@ func addExponentialHistogramMetric[N int64 | float64](
desc := prometheus.NewDesc(name, m.Description, keys, nil)
// Prometheus native histograms support scales in the range [-4, 8]
scale := dp.Scale
if scale < -4 {
// Reject scales below -4 as they cannot be represented in Prometheus
otel.Handle(fmt.Errorf(
"exponential histogram scale %d is below minimum supported scale -4, skipping data point",
scale))
continue
}
// If scale > 8, we need to downscale the buckets to match the clamped scale
positiveBucket := dp.PositiveBucket
negativeBucket := dp.NegativeBucket
if scale > 8 {
scaleDelta := scale - 8
positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta)
negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta)
scale = 8
}
// From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one.
positiveBuckets := make(map[int]int64)
for i, c := range dp.PositiveBucket.Counts {
for i, c := range positiveBucket.Counts {
if c > math.MaxInt64 {
otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c))
continue
}
positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
}
negativeBuckets := make(map[int]int64)
for i, c := range dp.NegativeBucket.Counts {
for i, c := range negativeBucket.Counts {
if c > math.MaxInt64 {
otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c))
continue
}
negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
}
m, err := prometheus.NewConstNativeHistogram(
@ -299,7 +357,7 @@ func addExponentialHistogramMetric[N int64 | float64](
positiveBuckets,
negativeBuckets,
dp.ZeroCount,
dp.Scale,
scale,
dp.ZeroThreshold,
dp.StartTime,
values...)
@ -440,15 +498,11 @@ func createInfoMetric(name, description string, res *resource.Resource) (prometh
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
}
func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) {
attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version
attrs = append(attrs, scope.Attributes.ToSlice()...)
attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name))
attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version))
keys, values := getAttrs(attribute.NewSet(attrs...))
desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil)
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
func unitMapGetOrDefault(unit string) string {
if promUnit, ok := unitSuffixes[unit]; ok {
return promUnit
}
return unit
}
var unitSuffixes = map[string]string{
@ -509,7 +563,7 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
if c.namespace != "" {
name = c.namespace + name
}
if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
name += "_" + suffix
}
if addCounterSuffix {
@ -556,30 +610,6 @@ func (c *collector) createResourceAttributes(res *resource.Resource) {
c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
}
func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) {
c.mu.Lock()
defer c.mu.Unlock()
scopeInfo, ok := c.scopeInfos[scope]
if ok {
return scopeInfo, nil
}
if _, ok := c.scopeInfosInvalid[scope]; ok {
return nil, errScopeInvalid
}
scopeInfo, err := createScopeInfoMetric(scope)
if err != nil {
c.scopeInfosInvalid[scope] = struct{}{}
return nil, fmt.Errorf("cannot create scope info metric: %w", err)
}
c.scopeInfos[scope] = scopeInfo
return scopeInfo, nil
}
func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) {
c.mu.Lock()
defer c.mu.Unlock()

View file

@ -106,7 +106,7 @@ func (e *Exporter) newRecordJSON(r sdklog.Record) recordJSON {
Attributes: make([]keyValue, 0, r.AttributesLen()),
Resource: &res,
Resource: res,
Scope: r.InstrumentationScope(),
DroppedAttributes: r.DroppedAttributes(),

View file

@ -136,5 +136,6 @@ func WithSchemaURL(schemaURL string) LoggerOption {
// EnabledParameters represents payload for [Logger]'s Enabled method.
type EnabledParameters struct {
Severity Severity
Severity Severity
EventName string
}

View file

@ -119,7 +119,9 @@ func newTimeoutExporter(exp Exporter, timeout time.Duration) Exporter {
// Export sets the timeout of ctx before calling the Exporter e wraps.
func (e *timeoutExporter) Export(ctx context.Context, records []Record) error {
ctx, cancel := context.WithTimeout(ctx, e.timeout)
// This only used by the batch processor, and it takes processor timeout config.
// Thus, the error message points to the processor. So users know they should adjust the processor timeout.
ctx, cancel := context.WithTimeoutCause(ctx, e.timeout, errors.New("processor export timeout"))
defer cancel()
return e.Exporter.Export(ctx, records)
}

View file

@ -57,4 +57,5 @@ type FilterProcessor interface {
type EnabledParameters struct {
InstrumentationScope instrumentation.Scope
Severity log.Severity
EventName string
}

View file

@ -52,6 +52,7 @@ func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool
p := EnabledParameters{
InstrumentationScope: l.instrumentationScope,
Severity: param.Severity,
EventName: param.EventName,
}
// If there are more Processors than FilterProcessors,

View file

@ -387,11 +387,8 @@ func (r *Record) SetTraceFlags(flags trace.TraceFlags) {
}
// Resource returns the entity that collected the log.
func (r *Record) Resource() resource.Resource {
if r.resource == nil {
return *resource.Empty()
}
return *r.resource
func (r *Record) Resource() *resource.Resource {
return r.resource
}
// InstrumentationScope returns the scope that the Logger was created with.

View file

@ -202,7 +202,7 @@ func (r *PeriodicReader) aggregation(
// collectAndExport gather all metric data related to the periodicReader r from
// the SDK and exports it with r's exporter.
func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, r.timeout)
ctx, cancel := context.WithTimeoutCause(ctx, r.timeout, errors.New("reader collect and export timeout"))
defer cancel()
// TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
@ -278,7 +278,7 @@ func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
// Prioritize the ctx timeout if it is set.
if _, ok := ctx.Deadline(); !ok {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.timeout)
ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader force flush timeout"))
defer cancel()
}
@ -311,7 +311,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error {
// Prioritize the ctx timeout if it is set.
if _, ok := ctx.Deadline(); !ok {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.timeout)
ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader shutdown timeout"))
defer cancel()
}

View file

@ -121,6 +121,14 @@ func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
//
// This method is safe to call concurrently.
func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
// Only check if context is already cancelled before starting, not inside or after callback loops.
// If this method returns after executing some callbacks but before running all aggregations,
// internal aggregation state can be corrupted and result in incorrect data returned
// by future produce calls.
if err := ctx.Err(); err != nil {
return err
}
p.Lock()
defer p.Unlock()
@ -130,12 +138,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
if e := c(ctx); e != nil {
err = errors.Join(err, e)
}
if err := ctx.Err(); err != nil {
rm.Resource = nil
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
rm.ScopeMetrics = rm.ScopeMetrics[:0]
return err
}
}
for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
// TODO make the callbacks parallel. ( #3034 )
@ -143,13 +145,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
if e := f(ctx); e != nil {
err = errors.Join(err, e)
}
if err := ctx.Err(); err != nil {
// This means the context expired before we finished running callbacks.
rm.Resource = nil
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
rm.ScopeMetrics = rm.ScopeMetrics[:0]
return err
}
}
rm.Resource = p.resource

View file

@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
return "1.36.0"
return "1.37.0"
}

View file

@ -13,7 +13,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
)
type (

View file

@ -11,7 +11,7 @@ import (
"os"
"regexp"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
)
type containerIDProvider func() (string, error)

View file

@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
)
const (

View file

@ -8,7 +8,7 @@ import (
"errors"
"strings"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
)
type hostIDProvider func() (string, error)

View file

@ -8,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
)
type osDescriptionProvider func() (string, error)

View file

@ -11,7 +11,7 @@ import (
"path/filepath"
"runtime"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
)
type (

View file

@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"errors"
"sync"
"sync/atomic"
"time"
@ -267,7 +268,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if bsp.o.ExportTimeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout"))
defer cancel()
}

View file

@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)

View file

@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
return "1.36.0"
return "1.37.0"
}

View file

@ -0,0 +1,4 @@
<!-- Generated. DO NOT MODIFY. -->
# Migration from v1.33.0 to v1.34.0
The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`.

View file

@ -0,0 +1,3 @@
# Semconv v1.34.0
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
// patterns for OpenTelemetry things. This package represents the v1.34.0
// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"

View file

@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
ExceptionEventName = "exception"
)

View file

@ -0,0 +1,508 @@
// Code generated from semantic convention specification. DO NOT EDIT.
// Package httpconv provides types and functionality for OpenTelemetry semantic
// conventions in the "go" namespace.
package goconv
import (
"context"
"sync"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/noop"
)
var (
addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }}
recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }}
)
// MemoryTypeAttr is an attribute conforming to the go.memory.type semantic
// conventions. It represents the type of memory.
type MemoryTypeAttr string
var (
// MemoryTypeStack is the memory allocated from the heap that is reserved for
// stack space, whether or not it is currently in-use.
MemoryTypeStack MemoryTypeAttr = "stack"
// MemoryTypeOther is the memory used by the Go runtime, excluding other
// categories of memory usage described in this enumeration.
MemoryTypeOther MemoryTypeAttr = "other"
)
// ConfigGogc is an instrument used to record metric values conforming to the
// "go.config.gogc" semantic conventions. It represents the heap size target
// percentage configured by the user, otherwise 100.
type ConfigGogc struct {
metric.Int64ObservableUpDownCounter
}
// NewConfigGogc returns a new ConfigGogc instrument.
func NewConfigGogc(
m metric.Meter,
opt ...metric.Int64ObservableUpDownCounterOption,
) (ConfigGogc, error) {
// Check if the meter is nil.
if m == nil {
return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, nil
}
i, err := m.Int64ObservableUpDownCounter(
"go.config.gogc",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."),
metric.WithUnit("%"),
}, opt...)...,
)
if err != nil {
return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, err
}
return ConfigGogc{i}, nil
}
// Inst returns the underlying metric instrument.
func (m ConfigGogc) Inst() metric.Int64ObservableUpDownCounter {
return m.Int64ObservableUpDownCounter
}
// Name returns the semantic convention name of the instrument.
func (ConfigGogc) Name() string {
return "go.config.gogc"
}
// Unit returns the semantic convention unit of the instrument
func (ConfigGogc) Unit() string {
return "%"
}
// Description returns the semantic convention description of the instrument
func (ConfigGogc) Description() string {
return "Heap size target percentage configured by the user, otherwise 100."
}
// GoroutineCount is an instrument used to record metric values conforming to the
// "go.goroutine.count" semantic conventions. It represents the count of live
// goroutines.
type GoroutineCount struct {
metric.Int64ObservableUpDownCounter
}
// NewGoroutineCount returns a new GoroutineCount instrument.
func NewGoroutineCount(
m metric.Meter,
opt ...metric.Int64ObservableUpDownCounterOption,
) (GoroutineCount, error) {
// Check if the meter is nil.
if m == nil {
return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, nil
}
i, err := m.Int64ObservableUpDownCounter(
"go.goroutine.count",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("Count of live goroutines."),
metric.WithUnit("{goroutine}"),
}, opt...)...,
)
if err != nil {
return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, err
}
return GoroutineCount{i}, nil
}
// Inst returns the underlying metric instrument.
func (m GoroutineCount) Inst() metric.Int64ObservableUpDownCounter {
return m.Int64ObservableUpDownCounter
}
// Name returns the semantic convention name of the instrument.
func (GoroutineCount) Name() string {
return "go.goroutine.count"
}
// Unit returns the semantic convention unit of the instrument
func (GoroutineCount) Unit() string {
return "{goroutine}"
}
// Description returns the semantic convention description of the instrument
func (GoroutineCount) Description() string {
return "Count of live goroutines."
}
// MemoryAllocated is an instrument used to record metric values conforming to
// the "go.memory.allocated" semantic conventions. It represents the memory
// allocated to the heap by the application.
type MemoryAllocated struct {
metric.Int64ObservableCounter
}
// NewMemoryAllocated returns a new MemoryAllocated instrument.
func NewMemoryAllocated(
m metric.Meter,
opt ...metric.Int64ObservableCounterOption,
) (MemoryAllocated, error) {
// Check if the meter is nil.
if m == nil {
return MemoryAllocated{noop.Int64ObservableCounter{}}, nil
}
i, err := m.Int64ObservableCounter(
"go.memory.allocated",
append([]metric.Int64ObservableCounterOption{
metric.WithDescription("Memory allocated to the heap by the application."),
metric.WithUnit("By"),
}, opt...)...,
)
if err != nil {
return MemoryAllocated{noop.Int64ObservableCounter{}}, err
}
return MemoryAllocated{i}, nil
}
// Inst returns the underlying metric instrument.
func (m MemoryAllocated) Inst() metric.Int64ObservableCounter {
return m.Int64ObservableCounter
}
// Name returns the semantic convention name of the instrument.
func (MemoryAllocated) Name() string {
return "go.memory.allocated"
}
// Unit returns the semantic convention unit of the instrument
func (MemoryAllocated) Unit() string {
return "By"
}
// Description returns the semantic convention description of the instrument
func (MemoryAllocated) Description() string {
return "Memory allocated to the heap by the application."
}
// MemoryAllocations is an instrument used to record metric values conforming to
// the "go.memory.allocations" semantic conventions. It represents the count of
// allocations to the heap by the application.
type MemoryAllocations struct {
metric.Int64ObservableCounter
}
// NewMemoryAllocations returns a new MemoryAllocations instrument.
func NewMemoryAllocations(
m metric.Meter,
opt ...metric.Int64ObservableCounterOption,
) (MemoryAllocations, error) {
// Check if the meter is nil.
if m == nil {
return MemoryAllocations{noop.Int64ObservableCounter{}}, nil
}
i, err := m.Int64ObservableCounter(
"go.memory.allocations",
append([]metric.Int64ObservableCounterOption{
metric.WithDescription("Count of allocations to the heap by the application."),
metric.WithUnit("{allocation}"),
}, opt...)...,
)
if err != nil {
return MemoryAllocations{noop.Int64ObservableCounter{}}, err
}
return MemoryAllocations{i}, nil
}
// Inst returns the underlying metric instrument.
func (m MemoryAllocations) Inst() metric.Int64ObservableCounter {
return m.Int64ObservableCounter
}
// Name returns the semantic convention name of the instrument.
func (MemoryAllocations) Name() string {
return "go.memory.allocations"
}
// Unit returns the semantic convention unit of the instrument
func (MemoryAllocations) Unit() string {
return "{allocation}"
}
// Description returns the semantic convention description of the instrument
func (MemoryAllocations) Description() string {
return "Count of allocations to the heap by the application."
}
// MemoryGCGoal is an instrument used to record metric values conforming to the
// "go.memory.gc.goal" semantic conventions. It represents the heap size target
// for the end of the GC cycle.
type MemoryGCGoal struct {
metric.Int64ObservableUpDownCounter
}
// NewMemoryGCGoal returns a new MemoryGCGoal instrument.
func NewMemoryGCGoal(
m metric.Meter,
opt ...metric.Int64ObservableUpDownCounterOption,
) (MemoryGCGoal, error) {
// Check if the meter is nil.
if m == nil {
return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, nil
}
i, err := m.Int64ObservableUpDownCounter(
"go.memory.gc.goal",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("Heap size target for the end of the GC cycle."),
metric.WithUnit("By"),
}, opt...)...,
)
if err != nil {
return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, err
}
return MemoryGCGoal{i}, nil
}
// Inst returns the underlying metric instrument.
func (m MemoryGCGoal) Inst() metric.Int64ObservableUpDownCounter {
return m.Int64ObservableUpDownCounter
}
// Name returns the semantic convention name of the instrument.
func (MemoryGCGoal) Name() string {
return "go.memory.gc.goal"
}
// Unit returns the semantic convention unit of the instrument
func (MemoryGCGoal) Unit() string {
return "By"
}
// Description returns the semantic convention description of the instrument
func (MemoryGCGoal) Description() string {
return "Heap size target for the end of the GC cycle."
}
// MemoryLimit is an instrument used to record metric values conforming to the
// "go.memory.limit" semantic conventions. It represents the go runtime memory
// limit configured by the user, if a limit exists.
type MemoryLimit struct {
metric.Int64ObservableUpDownCounter
}
// NewMemoryLimit returns a new MemoryLimit instrument.
func NewMemoryLimit(
m metric.Meter,
opt ...metric.Int64ObservableUpDownCounterOption,
) (MemoryLimit, error) {
// Check if the meter is nil.
if m == nil {
return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, nil
}
i, err := m.Int64ObservableUpDownCounter(
"go.memory.limit",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."),
metric.WithUnit("By"),
}, opt...)...,
)
if err != nil {
return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, err
}
return MemoryLimit{i}, nil
}
// Inst returns the underlying metric instrument.
func (m MemoryLimit) Inst() metric.Int64ObservableUpDownCounter {
return m.Int64ObservableUpDownCounter
}
// Name returns the semantic convention name of the instrument.
func (MemoryLimit) Name() string {
return "go.memory.limit"
}
// Unit returns the semantic convention unit of the instrument
func (MemoryLimit) Unit() string {
return "By"
}
// Description returns the semantic convention description of the instrument
func (MemoryLimit) Description() string {
return "Go runtime memory limit configured by the user, if a limit exists."
}
// MemoryUsed is an instrument used to record metric values conforming to the
// "go.memory.used" semantic conventions. It represents the memory used by the Go
// runtime.
type MemoryUsed struct {
metric.Int64ObservableUpDownCounter
}
// NewMemoryUsed returns a new MemoryUsed instrument.
func NewMemoryUsed(
m metric.Meter,
opt ...metric.Int64ObservableUpDownCounterOption,
) (MemoryUsed, error) {
// Check if the meter is nil.
if m == nil {
return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, nil
}
i, err := m.Int64ObservableUpDownCounter(
"go.memory.used",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("Memory used by the Go runtime."),
metric.WithUnit("By"),
}, opt...)...,
)
if err != nil {
return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, err
}
return MemoryUsed{i}, nil
}
// Inst returns the underlying metric instrument.
func (m MemoryUsed) Inst() metric.Int64ObservableUpDownCounter {
return m.Int64ObservableUpDownCounter
}
// Name returns the semantic convention name of the instrument.
func (MemoryUsed) Name() string {
return "go.memory.used"
}
// Unit returns the semantic convention unit of the instrument
func (MemoryUsed) Unit() string {
return "By"
}
// Description returns the semantic convention description of the instrument
func (MemoryUsed) Description() string {
return "Memory used by the Go runtime."
}
// AttrMemoryType returns an optional attribute for the "go.memory.type" semantic
// convention. It represents the type of memory.
func (MemoryUsed) AttrMemoryType(val MemoryTypeAttr) attribute.KeyValue {
return attribute.String("go.memory.type", string(val))
}
// ProcessorLimit is an instrument used to record metric values conforming to the
// "go.processor.limit" semantic conventions. It represents the number of OS
// threads that can execute user-level Go code simultaneously.
type ProcessorLimit struct {
metric.Int64ObservableUpDownCounter
}
// NewProcessorLimit returns a new ProcessorLimit instrument.
func NewProcessorLimit(
m metric.Meter,
opt ...metric.Int64ObservableUpDownCounterOption,
) (ProcessorLimit, error) {
// Check if the meter is nil.
if m == nil {
return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, nil
}
i, err := m.Int64ObservableUpDownCounter(
"go.processor.limit",
append([]metric.Int64ObservableUpDownCounterOption{
metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."),
metric.WithUnit("{thread}"),
}, opt...)...,
)
if err != nil {
return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, err
}
return ProcessorLimit{i}, nil
}
// Inst returns the underlying metric instrument.
func (m ProcessorLimit) Inst() metric.Int64ObservableUpDownCounter {
return m.Int64ObservableUpDownCounter
}
// Name returns the semantic convention name of the instrument.
func (ProcessorLimit) Name() string {
return "go.processor.limit"
}
// Unit returns the semantic convention unit of the instrument
func (ProcessorLimit) Unit() string {
return "{thread}"
}
// Description returns the semantic convention description of the instrument
func (ProcessorLimit) Description() string {
return "The number of OS threads that can execute user-level Go code simultaneously."
}
// ScheduleDuration is an instrument used to record metric values conforming to
// the "go.schedule.duration" semantic conventions. It represents the time
// goroutines have spent in the scheduler in a runnable state before actually
// running.
type ScheduleDuration struct {
metric.Float64Histogram
}
// NewScheduleDuration returns a new ScheduleDuration instrument.
func NewScheduleDuration(
m metric.Meter,
opt ...metric.Float64HistogramOption,
) (ScheduleDuration, error) {
// Check if the meter is nil.
if m == nil {
return ScheduleDuration{noop.Float64Histogram{}}, nil
}
i, err := m.Float64Histogram(
"go.schedule.duration",
append([]metric.Float64HistogramOption{
metric.WithDescription("The time goroutines have spent in the scheduler in a runnable state before actually running."),
metric.WithUnit("s"),
}, opt...)...,
)
if err != nil {
return ScheduleDuration{noop.Float64Histogram{}}, err
}
return ScheduleDuration{i}, nil
}
// Inst returns the underlying metric instrument.
func (m ScheduleDuration) Inst() metric.Float64Histogram {
return m.Float64Histogram
}
// Name returns the semantic convention name of the instrument.
func (ScheduleDuration) Name() string {
return "go.schedule.duration"
}
// Unit returns the semantic convention unit of the instrument
func (ScheduleDuration) Unit() string {
return "s"
}
// Description returns the semantic convention description of the instrument
func (ScheduleDuration) Description() string {
return "The time goroutines have spent in the scheduler in a runnable state before actually running."
}
// Record records val to the current distribution.
//
// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by
// the runtime, and are subject to change.
func (m ScheduleDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) {
if len(attrs) == 0 {
m.Float64Histogram.Record(ctx, val)
}
o := recOptPool.Get().(*[]metric.RecordOption)
defer func() {
*o = (*o)[:0]
recOptPool.Put(o)
}()
*o = append(*o, metric.WithAttributes(attrs...))
m.Float64Histogram.Record(ctx, val, *o...)
}

View file

@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
const SchemaURL = "https://opentelemetry.io/schemas/1.34.0"

View file

@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)

View file

@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.36.0"
return "1.37.0"
}

View file

@ -3,13 +3,12 @@
module-sets:
stable-v1:
version: v1.36.0
version: v1.37.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
@ -23,14 +22,16 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.58.0
version: v0.59.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
version: v0.12.0
version: v0.13.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
- go.opentelemetry.io/otel/sdk/log
- go.opentelemetry.io/otel/sdk/log/logtest
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
@ -40,6 +41,4 @@ module-sets:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- go.opentelemetry.io/otel/log/logtest
- go.opentelemetry.io/otel/sdk/log/logtest
- go.opentelemetry.io/otel/trace/internal/telemetry/test