[chore] Update usage of OTEL libraries (#2725)

* otel to 1.24
* prometheus exporter to 0.46
* bunotel to 1.1.17

Also:
* Use schemaless URL for metrics
* Add software version to tracing schema
This commit is contained in:
Daenney 2024-03-11 15:34:34 +01:00 committed by GitHub
commit 5e871e81a8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
126 changed files with 12940 additions and 2267 deletions

View file

@ -52,3 +52,43 @@ func (c *cache[K, V]) Lookup(key K, f func() V) V {
c.data[key] = val
return val
}
// HasKey returns true if Lookup has previously been called with that key
//
// HasKey is safe to call concurrently.
func (c *cache[K, V]) HasKey(key K) bool {
c.Lock()
defer c.Unlock()
_, ok := c.data[key]
return ok
}
// cacheWithErr is a locking storage used to quickly return already computed values and an error.
//
// The zero value of a cacheWithErr is empty and ready to use.
//
// A cacheWithErr must not be copied after first use.
//
// All methods of a cacheWithErr are safe to call concurrently.
type cacheWithErr[K comparable, V any] struct {
cache[K, valAndErr[V]]
}
type valAndErr[V any] struct {
val V
err error
}
// Lookup returns the value stored in the cacheWithErr with the associated key
// if it exists. Otherwise, f is called and its returned value is set in the
// cacheWithErr for key and returned.
//
// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
// should not block excessively.
func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
combined := c.cache.Lookup(key, func() valAndErr[V] {
val, err := f()
return valAndErr[V]{val: val, err: err}
})
return combined.val, combined.err
}

View file

@ -44,4 +44,7 @@
//
// See [go.opentelemetry.io/otel/metric] for more information about
// the metric API.
//
// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about
// the experimental features.
package metric // import "go.opentelemetry.io/otel/sdk/metric"

96
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go generated vendored Normal file
View file

@ -0,0 +1,96 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/sdk/metric"
import (
"os"
"runtime"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/internal/x"
)
// reservoirFunc returns the appropriately configured exemplar reservoir
// creation func based on the passed InstrumentKind and user defined
// environment variables.
//
// Note: This will only return non-nil values when the experimental exemplar
// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
// is not set to always_off.
func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir[N] {
if !x.Exemplars.Enabled() {
return nil
}
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
resF := func() func() exemplar.Reservoir[N] {
// Explicit bucket histogram aggregation with more than 1 bucket will
// use AlignedHistogramBucketExemplarReservoir.
a, ok := agg.(AggregationExplicitBucketHistogram)
if ok && len(a.Boundaries) > 0 {
cp := make([]float64, len(a.Boundaries))
copy(cp, a.Boundaries)
return func() exemplar.Reservoir[N] {
bounds := cp
return exemplar.Histogram[N](bounds)
}
}
var n int
if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
// Base2 Exponential Histogram Aggregation SHOULD use a
// SimpleFixedSizeExemplarReservoir with a reservoir equal to the
// smaller of the maximum number of buckets configured on the
// aggregation or twenty (e.g. min(20, max_buckets)).
n = int(a.MaxSize)
if n > 20 {
n = 20
}
} else {
// https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
// This Exemplar reservoir MAY take a configuration parameter for
// the size of the reservoir. If no size configuration is
// provided, the default size MAY be the number of possible
// concurrent threads (e.g. number of CPUs) to help reduce
// contention. Otherwise, a default size of 1 SHOULD be used.
n = runtime.NumCPU()
if n < 1 {
// Should never be the case, but be defensive.
n = 1
}
}
return func() exemplar.Reservoir[N] {
return exemplar.FixedSize[N](n)
}
}
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
switch os.Getenv(filterEnvKey) {
case "always_on":
return resF()
case "always_off":
return exemplar.Drop[N]
case "trace_based":
fallthrough
default:
newR := resF()
return func() exemplar.Reservoir[N] {
return exemplar.SampledFilter(newR())
}
}
}

View file

@ -205,9 +205,6 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record
}
func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
if err := ctx.Err(); err != nil {
return
}
for _, in := range i.measures {
in(ctx, val, s)
}
@ -238,9 +235,6 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re
}
func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
if err := ctx.Err(); err != nil {
return
}
for _, in := range i.measures {
in(ctx, val, s)
}
@ -270,9 +264,9 @@ var (
_ metric.Float64ObservableGauge = float64Observable{}
)
func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[float64]) float64Observable {
func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
return float64Observable{
observable: newObservable(m, kind, name, desc, u, meas),
observable: newObservable[float64](m, kind, name, desc, u),
}
}
@ -291,9 +285,9 @@ var (
_ metric.Int64ObservableGauge = int64Observable{}
)
func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[int64]) int64Observable {
func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
return int64Observable{
observable: newObservable(m, kind, name, desc, u, meas),
observable: newObservable[int64](m, kind, name, desc, u),
}
}
@ -301,11 +295,12 @@ type observable[N int64 | float64] struct {
metric.Observable
observablID[N]
meter *meter
measures []aggregate.Measure[N]
meter *meter
measures measures[N]
dropAggregation bool
}
func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[N]) *observable[N] {
func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
return &observable[N]{
observablID: observablID[N]{
name: name,
@ -314,14 +309,24 @@ func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc,
unit: u,
scope: m.scope,
},
meter: m,
measures: meas,
meter: m,
}
}
// observe records the val for the set of attrs.
func (o *observable[N]) observe(val N, s attribute.Set) {
for _, in := range o.measures {
o.measures.observe(val, s)
}
func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
o.measures = append(o.measures, meas...)
}
type measures[N int64 | float64] []aggregate.Measure[N]
// observe records the val for the set of attrs.
func (m measures[N]) observe(val N, s attribute.Set) {
for _, in := range m {
in(context.Background(), val, s)
}
}

View file

@ -19,6 +19,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@ -44,17 +45,43 @@ type Builder[N int64 | float64] struct {
// Filter is the attribute filter the aggregate function will use on the
// input of measurements.
Filter attribute.Filter
// ReservoirFunc is the factory function used by aggregate functions to
// create new exemplar reservoirs for a new seen attribute set.
//
// If this is not provided a default factory function that returns an
// exemplar.Drop reservoir will be used.
ReservoirFunc func() exemplar.Reservoir[N]
// AggregationLimit is the cardinality limit of measurement attributes. Any
// measurement for new attributes once the limit has been reached will be
// aggregated into a single aggregate for the "otel.metric.overflow"
// attribute.
//
// If AggregationLimit is less than or equal to zero there will not be an
// aggregation limit imposed (i.e. unlimited attribute sets).
AggregationLimit int
}
func (b Builder[N]) filter(f Measure[N]) Measure[N] {
func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] {
if b.ReservoirFunc != nil {
return b.ReservoirFunc
}
return exemplar.Drop[N]
}
type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
if b.Filter != nil {
fltr := b.Filter // Copy to make it immutable after assignment.
return func(ctx context.Context, n N, a attribute.Set) {
fAttr, _ := a.Filter(fltr)
f(ctx, n, fAttr)
fAttr, dropped := a.Filter(fltr)
f(ctx, n, fAttr, dropped)
}
}
return f
return func(ctx context.Context, n N, a attribute.Set) {
f(ctx, n, a, nil)
}
}
// LastValue returns a last-value aggregate function input and output.
@ -63,7 +90,7 @@ func (b Builder[N]) filter(f Measure[N]) Measure[N] {
func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
// Delta temporality is the only temporality that makes semantic sense for
// a last-value aggregate.
lv := newLastValue[N]()
lv := newLastValue[N](b.AggregationLimit, b.resFunc())
return b.filter(lv.measure), func(dest *metricdata.Aggregation) int {
// Ignore if dest is not a metricdata.Gauge. The chance for memory
@ -79,7 +106,7 @@ func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
// PrecomputedSum returns a sum aggregate function input and output. The
// arguments passed to the input are expected to be the precomputed sum values.
func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
s := newPrecomputedSum[N](monotonic)
s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(s.measure), s.delta
@ -90,7 +117,7 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati
// Sum returns a sum aggregate function input and output.
func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
s := newSum[N](monotonic)
s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(s.measure), s.delta
@ -102,7 +129,7 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
// ExplicitBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
h := newHistogram[N](boundaries, noMinMax, noSum)
h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(h.measure), h.delta
@ -114,7 +141,7 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu
// ExponentialBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum)
h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(h.measure), h.delta

View file

@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@ -40,6 +41,8 @@ const (
// expoHistogramDataPoint is a single data point in an exponential histogram.
type expoHistogramDataPoint[N int64 | float64] struct {
res exemplar.Reservoir[N]
count uint64
min N
max N
@ -288,13 +291,15 @@ func (b *expoBuckets) downscale(delta int) {
// newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in.
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool) *expoHistogram[N] {
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] {
return &expoHistogram[N]{
noSum: noSum,
noMinMax: noMinMax,
maxSize: int(maxSize),
maxScale: int(maxScale),
newRes: r,
limit: newLimiter[*expoHistogramDataPoint[N]](limit),
values: make(map[attribute.Set]*expoHistogramDataPoint[N]),
start: now(),
@ -309,27 +314,35 @@ type expoHistogram[N int64 | float64] struct {
maxSize int
maxScale int
newRes func() exemplar.Reservoir[N]
limit limiter[*expoHistogramDataPoint[N]]
values map[attribute.Set]*expoHistogramDataPoint[N]
valuesMu sync.Mutex
start time.Time
}
func (e *expoHistogram[N]) measure(_ context.Context, value N, attr attribute.Set) {
func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
// Ignore NaN and infinity.
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
return
}
t := now()
e.valuesMu.Lock()
defer e.valuesMu.Unlock()
attr := e.limit.Attributes(fltrAttr, e.values)
v, ok := e.values[attr]
if !ok {
v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum)
v.res = e.newRes()
e.values[attr] = v
}
v.record(value)
v.res.Offer(ctx, t, value, droppedAttr)
}
func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
@ -362,6 +375,7 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
if !e.noSum {
hDPts[i].Sum = b.sum
@ -371,6 +385,8 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
b.res.Collect(&hDPts[i].Exemplars)
delete(e.values, a)
i++
}
@ -410,6 +426,7 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
if !e.noSum {
hDPts[i].Sum = b.sum
@ -419,6 +436,8 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
b.res.Collect(&hDPts[i].Exemplars)
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute

View file

@ -21,10 +21,13 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type buckets[N int64 | float64] struct {
res exemplar.Reservoir[N]
counts []uint64
count uint64
total N
@ -54,11 +57,13 @@ type histValues[N int64 | float64] struct {
noSum bool
bounds []float64
newRes func() exemplar.Reservoir[N]
limit limiter[*buckets[N]]
values map[attribute.Set]*buckets[N]
valuesMu sync.Mutex
}
func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] {
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] {
// The responsibility of keeping all buckets correctly associated with the
// passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have
@ -69,13 +74,15 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[
return &histValues[N]{
noSum: noSum,
bounds: b,
newRes: r,
limit: newLimiter[*buckets[N]](limit),
values: make(map[attribute.Set]*buckets[N]),
}
}
// Aggregate records the measurement value, scoped by attr, and aggregates it
// into a histogram.
func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) {
func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
// This search will return an index in the range [0, len(s.bounds)], where
// it will return len(s.bounds) if value is greater than the last element
// of s.bounds. This aligns with the buckets in that the length of buckets
@ -83,9 +90,12 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
// (s.bounds[len(s.bounds)-1], +∞).
idx := sort.SearchFloat64s(s.bounds, float64(value))
t := now()
s.valuesMu.Lock()
defer s.valuesMu.Unlock()
attr := s.limit.Attributes(fltrAttr, s.values)
b, ok := s.values[attr]
if !ok {
// N+1 buckets. For example:
@ -96,6 +106,8 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
//
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
b = newBuckets[N](len(s.bounds) + 1)
b.res = s.newRes()
// Ensure min and max are recorded values (not zero), for new buckets.
b.min, b.max = value, value
s.values[attr] = b
@ -104,13 +116,14 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
if !s.noSum {
b.sum(value)
}
b.res.Offer(ctx, t, value, droppedAttr)
}
// newHistogram returns an Aggregator that summarizes a set of measurements as
// an histogram.
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] {
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] {
return &histogram[N]{
histValues: newHistValues[N](boundaries, noSum),
histValues: newHistValues[N](boundaries, noSum, limit, r),
noMinMax: noMinMax,
start: now(),
}
@ -161,6 +174,8 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
b.res.Collect(&hDPts[i].Exemplars)
// Unused attribute sets do not report.
delete(s.values, a)
i++
@ -217,6 +232,9 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].Min = metricdata.NewExtrema(b.min)
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
b.res.Collect(&hDPts[i].Exemplars)
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute

View file

@ -20,6 +20,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@ -27,24 +28,43 @@ import (
type datapoint[N int64 | float64] struct {
timestamp time.Time
value N
res exemplar.Reservoir[N]
}
func newLastValue[N int64 | float64]() *lastValue[N] {
return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])}
func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] {
return &lastValue[N]{
newRes: r,
limit: newLimiter[datapoint[N]](limit),
values: make(map[attribute.Set]datapoint[N]),
}
}
// lastValue summarizes a set of measurements as the last one made.
type lastValue[N int64 | float64] struct {
sync.Mutex
newRes func() exemplar.Reservoir[N]
limit limiter[datapoint[N]]
values map[attribute.Set]datapoint[N]
}
func (s *lastValue[N]) measure(ctx context.Context, value N, attr attribute.Set) {
d := datapoint[N]{timestamp: now(), value: value}
func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
t := now()
s.Lock()
defer s.Unlock()
attr := s.limit.Attributes(fltrAttr, s.values)
d, ok := s.values[attr]
if !ok {
d.res = s.newRes()
}
d.timestamp = t
d.value = value
d.res.Offer(ctx, t, value, droppedAttr)
s.values[attr] = d
s.Unlock()
}
func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
@ -61,6 +81,7 @@ func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
// ignored.
(*dest)[i].Time = v.timestamp
(*dest)[i].Value = v.value
v.res.Collect(&(*dest)[i].Exemplars)
// Do not report stale values.
delete(s.values, a)
i++

View file

@ -0,0 +1,53 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
import "go.opentelemetry.io/otel/attribute"
// overflowSet is the attribute set used to record a measurement when adding
// another distinct attribute set to the aggregate would exceed the aggregate
// limit.
var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
// limiter limits aggregate values.
type limiter[V any] struct {
// aggLimit is the maximum number of metric streams that can be aggregated.
//
// Any metric stream with attributes distinct from any set already
// aggregated once the aggLimit will be meet will instead be aggregated
// into an "overflow" metric stream. That stream will only contain the
// "otel.metric.overflow"=true attribute.
aggLimit int
}
// newLimiter returns a new Limiter with the provided aggregation limit.
func newLimiter[V any](aggregation int) limiter[V] {
return limiter[V]{aggLimit: aggregation}
}
// Attributes checks if adding a measurement for attrs will exceed the
// aggregation cardinality limit for the existing measurements. If it will,
// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
// limit is not set (limit <= 0), attr is returned.
func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Set]V) attribute.Set {
if l.aggLimit > 0 {
_, exists := measurements[attrs]
if !exists && len(measurements) >= l.aggLimit-1 {
return overflowSet
}
}
return attrs
}

View file

@ -20,31 +20,55 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type sumValue[N int64 | float64] struct {
n N
res exemplar.Reservoir[N]
}
// valueMap is the storage for sums.
type valueMap[N int64 | float64] struct {
sync.Mutex
values map[attribute.Set]N
newRes func() exemplar.Reservoir[N]
limit limiter[sumValue[N]]
values map[attribute.Set]sumValue[N]
}
func newValueMap[N int64 | float64]() *valueMap[N] {
return &valueMap[N]{values: make(map[attribute.Set]N)}
func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] {
return &valueMap[N]{
newRes: r,
limit: newLimiter[sumValue[N]](limit),
values: make(map[attribute.Set]sumValue[N]),
}
}
func (s *valueMap[N]) measure(_ context.Context, value N, attr attribute.Set) {
func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
t := now()
s.Lock()
s.values[attr] += value
s.Unlock()
defer s.Unlock()
attr := s.limit.Attributes(fltrAttr, s.values)
v, ok := s.values[attr]
if !ok {
v.res = s.newRes()
}
v.n += value
v.res.Offer(ctx, t, value, droppedAttr)
s.values[attr] = v
}
// newSum returns an aggregator that summarizes a set of measurements as their
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
// the measurements were made in.
func newSum[N int64 | float64](monotonic bool) *sum[N] {
func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] {
return &sum[N]{
valueMap: newValueMap[N](),
valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
start: now(),
}
@ -74,11 +98,12 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
dPts := reset(sData.DataPoints, n, n)
var i int
for attr, value := range s.values {
for attr, val := range s.values {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = value
dPts[i].Value = val.n
val.res.Collect(&dPts[i].Exemplars)
// Do not report stale values.
delete(s.values, attr)
i++
@ -112,7 +137,8 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = value
dPts[i].Value = value.n
value.res.Collect(&dPts[i].Exemplars)
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
// sets that become "stale" need to be forgotten so this will not
@ -129,9 +155,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
// newPrecomputedSum returns an aggregator that summarizes a set of
// observatrions as their arithmetic sum. Each sum is scoped by attributes and
// the aggregation cycle the measurements were made in.
func newPrecomputedSum[N int64 | float64](monotonic bool) *precomputedSum[N] {
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] {
return &precomputedSum[N]{
valueMap: newValueMap[N](),
valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
start: now(),
}
@ -165,14 +191,15 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
var i int
for attr, value := range s.values {
delta := value - s.reported[attr]
delta := value.n - s.reported[attr]
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = delta
value.res.Collect(&dPts[i].Exemplars)
newReported[attr] = value
newReported[attr] = value.n
// Unused attribute sets do not report.
delete(s.values, attr)
i++
@ -204,11 +231,12 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
dPts := reset(sData.DataPoints, n, n)
var i int
for attr, value := range s.values {
for attr, val := range s.values {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = value
dPts[i].Value = val.n
val.res.Collect(&dPts[i].Exemplars)
// Unused attribute sets do not report.
delete(s.values, attr)

View file

@ -0,0 +1,17 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package exemplar provides an implementation of the OpenTelemetry exemplar
// reservoir to be used in metric collection pipelines.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"

View file

@ -0,0 +1,36 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// Drop returns a [Reservoir] that drops all measurements it is offered.
func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} }
type dropRes[N int64 | float64] struct{}
// Offer does nothing, all measurements offered will be dropped.
func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {}
// Collect resets dest. No exemplars will ever be returned.
func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
*dest = (*dest)[:0]
}

View file

@ -0,0 +1,40 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements
// to r if the passed context associated with the measurement contains a sampled
// [go.opentelemetry.io/otel/trace.SpanContext].
func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] {
return filtered[N]{Reservoir: r}
}
type filtered[N int64 | float64] struct {
Reservoir[N]
}
func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
if trace.SpanContextFromContext(ctx).IsSampled() {
f.Reservoir.Offer(ctx, t, n, a)
}
}

View file

@ -0,0 +1,47 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"sort"
"time"
"go.opentelemetry.io/otel/attribute"
)
// Histogram returns a [Reservoir] that samples the last measurement that falls
// within a histogram bucket. The histogram bucket upper-boundaries are define
// by bounds.
//
// The passed bounds will be sorted by this function.
func Histogram[N int64 | float64](bounds []float64) Reservoir[N] {
sort.Float64s(bounds)
return &histRes[N]{
bounds: bounds,
storage: newStorage[N](len(bounds) + 1),
}
}
type histRes[N int64 | float64] struct {
*storage[N]
// bounds are bucket bounds in ascending order.
bounds []float64
}
func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a)
}

View file

@ -0,0 +1,195 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"math"
"math/rand"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// rng is used to make sampling decisions.
//
// Do not use crypto/rand. There is no reason for the decrease in performance
// given this is not a security sensitive decision.
var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
// random returns, as a float64, a uniform pseudo-random number in the open
// interval (0.0,1.0).
func random() float64 {
// TODO: This does not return a uniform number. rng.Float64 returns a
// uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
// returns multiples of 2^-53, and not all floating point numbers between 0
// and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
// are always going to be 0).
//
// An alternative algorithm should be considered that will actually return
// a uniform number in the interval (0,1). For example, since the default
// rand source provides a uniform distribution for Int63, this can be
// converted following the prototypical code of Mersenne Twister 64 (Takuji
// Nishimura and Makoto Matsumoto:
// http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
//
// (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
//
// There are likely many other methods to explore here as well.
f := rng.Float64()
for f == 0 {
f = rng.Float64()
}
return f
}
// FixedSize returns a [Reservoir] that samples at most k exemplars. If there
// are k or less measurements made, the Reservoir will sample each one. If
// there are more than k, the Reservoir will then randomly sample all
// additional measurement with a decreasing probability.
func FixedSize[N int64 | float64](k int) Reservoir[N] {
r := &randRes[N]{storage: newStorage[N](k)}
r.reset()
return r
}
type randRes[N int64 | float64] struct {
*storage[N]
// count is the number of measurement seen.
count int64
// next is the next count that will store a measurement at a random index
// once the reservoir has been filled.
next int64
// w is the largest random number in a distribution that is used to compute
// the next next.
w float64
}
func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) {
// The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
// 1994). "Reservoir-Sampling Algorithms of Time Complexity
// O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
// 481493 (https://dl.acm.org/doi/10.1145/198429.198435).
//
// A high-level overview of "Algorithm L":
// 0) Pre-calculate the random count greater than the storage size when
// an exemplar will be replaced.
// 1) Accept all measurements offered until the configured storage size is
// reached.
// 2) Loop:
// a) When the pre-calculate count is reached, replace a random
// existing exemplar with the offered measurement.
// b) Calculate the next random count greater than the existing one
// which will replace another exemplars
//
// The way a "replacement" count is computed is by looking at `n` number of
// independent random numbers each corresponding to an offered measurement.
// Of these numbers the smallest `k` (the same size as the storage
// capacity) of them are kept as a subset. The maximum value in this
// subset, called `w` is used to weight another random number generation
// for the next count that will be considered.
//
// By weighting the next count computation like described, it is able to
// perform a uniformly-weighted sampling algorithm based on the number of
// samples the reservoir has seen so far. The sampling will "slow down" as
// more and more samples are offered so as to reduce a bias towards those
// offered just prior to the end of the collection.
//
// This algorithm is preferred because of its balance of simplicity and
// performance. It will compute three random numbers (the bulk of
// computation time) for each item that becomes part of the reservoir, but
// it does not spend any time on items that do not. In particular it has an
// asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
// measurements offered and k is the reservoir size.
//
// See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
// this and other reservoir sampling algorithms. See
// https://github.com/MrAlias/reservoir-sampling for a performance
// comparison of reservoir sampling algorithms.
if int(r.count) < cap(r.store) {
r.store[r.count] = newMeasurement(ctx, t, n, a)
} else {
if r.count == r.next {
// Overwrite a random existing measurement with the one offered.
idx := int(rng.Int63n(int64(cap(r.store))))
r.store[idx] = newMeasurement(ctx, t, n, a)
r.advance()
}
}
r.count++
}
// reset resets r to the initial state.
func (r *randRes[N]) reset() {
// This resets the number of exemplars known.
r.count = 0
// Random index inserts should only happen after the storage is full.
r.next = int64(cap(r.store))
// Initial random number in the series used to generate r.next.
//
// This is set before r.advance to reset or initialize the random number
// series. Without doing so it would always be 0 or never restart a new
// random number series.
//
// This maps the uniform random number in (0,1) to a geometric distribution
// over the same interval. The mean of the distribution is inversely
// proportional to the storage capacity.
r.w = math.Exp(math.Log(random()) / float64(cap(r.store)))
r.advance()
}
// advance updates the count at which the offered measurement will overwrite an
// existing exemplar.
func (r *randRes[N]) advance() {
// Calculate the next value in the random number series.
//
// The current value of r.w is based on the max of a distribution of random
// numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
// of the storage and each `u` in the interval (0,w)). To calculate the
// next r.w we use the fact that when the next exemplar is selected to be
// included in the storage an existing one will be dropped, and the
// corresponding random number in the set used to calculate r.w will also
// be replaced. The replacement random number will also be within (0,w),
// therefore the next r.w will be based on the same distribution (i.e.
// `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
// computing the next random number `u` and take r.w as `w * u^(1/k)`.
r.w *= math.Exp(math.Log(random()) / float64(cap(r.store)))
// Use the new random number in the series to calculate the count of the
// next measurement that will be stored.
//
// Given 0 < r.w < 1, each iteration will result in subsequent r.w being
// smaller. This translates here into the next next being selected against
// a distribution with a higher mean (i.e. the expected value will increase
// and replacements become less likely)
//
// Important to note, the new r.next will always be at least 1 more than
// the last r.next.
r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1
}
func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) {
r.storage.Collect(dest)
// Call reset here even though it will reset r.count and restart the random
// number series. This will persist any old exemplars as long as no new
// measurements are offered, but it will also prioritize those new
// measurements that are made over the older collection cycle ones.
r.reset()
}

View file

@ -0,0 +1,44 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// Reservoir holds the sampled exemplar of measurements made.
type Reservoir[N int64 | float64] interface {
// Offer accepts the parameters associated with a measurement. The
// parameters will be stored as an exemplar if the Reservoir decides to
// sample the measurement.
//
// The passed ctx needs to contain any baggage or span that were active
// when the measurement was made. This information may be used by the
// Reservoir in making a sampling decision.
//
// The time t is the time when the measurement was made. The val and attr
// parameters are the value and dropped (filtered) attributes of the
// measurement respectively.
Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue)
// Collect returns all the held exemplars.
//
// The Reservoir state is preserved after this call.
Collect(dest *[]metricdata.Exemplar[N])
}

View file

@ -0,0 +1,107 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
import (
"context"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/trace"
)
// storage is an exemplar storage for [Reservoir] implementations.
type storage[N int64 | float64] struct {
// store are the measurements sampled.
//
// This does not use []metricdata.Exemplar because it potentially would
// require an allocation for trace and span IDs in the hot path of Offer.
store []measurement[N]
}
func newStorage[N int64 | float64](n int) *storage[N] {
return &storage[N]{store: make([]measurement[N], n)}
}
// Collect returns all the held exemplars.
//
// The Reservoir state is preserved after this call.
func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) {
*dest = reset(*dest, len(r.store), len(r.store))
var n int
for _, m := range r.store {
if !m.valid {
continue
}
m.Exemplar(&(*dest)[n])
n++
}
*dest = (*dest)[:n]
}
// measurement is a measurement made by a telemetry system.
type measurement[N int64 | float64] struct {
// FilteredAttributes are the attributes dropped during the measurement.
FilteredAttributes []attribute.KeyValue
// Time is the time when the measurement was made.
Time time.Time
// Value is the value of the measurement.
Value N
// SpanContext is the SpanContext active when a measurement was made.
SpanContext trace.SpanContext
valid bool
}
// newMeasurement returns a new non-empty Measurement.
func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] {
return measurement[N]{
FilteredAttributes: droppedAttr,
Time: ts,
Value: v,
SpanContext: trace.SpanContextFromContext(ctx),
valid: true,
}
}
// Exemplar returns m as a [metricdata.Exemplar].
func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) {
dest.FilteredAttributes = m.FilteredAttributes
dest.Time = m.Time
dest.Value = m.Value
if m.SpanContext.HasTraceID() {
traceID := m.SpanContext.TraceID()
dest.TraceID = traceID[:]
} else {
dest.TraceID = dest.TraceID[:0]
}
if m.SpanContext.HasSpanID() {
spanID := m.SpanContext.SpanID()
dest.SpanID = spanID[:]
} else {
dest.SpanID = dest.SpanID[:0]
}
}
func reset[T any](s []T, length, capacity int) []T {
if cap(s) < capacity {
return make([]T, length, capacity)
}
return s[:length]
}

View file

@ -0,0 +1,112 @@
# Experimental Features
The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
These feature may change in backwards incompatible ways as feedback is applied.
See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
## Features
- [Cardinality Limit](#cardinality-limit)
- [Exemplars](#exemplars)
### Cardinality Limit
The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
The value must be an integer value.
All other values are ignored.
If the value set is less than or equal to `0`, no limit will be applied.
#### Examples
Set the cardinality limit to 2000.
```console
export OTEL_GO_X_CARDINALITY_LIMIT=2000
```
Set an infinite cardinality limit (functionally equivalent to disabling the feature).
```console
export OTEL_GO_X_CARDINALITY_LIMIT=-1
```
Disable the cardinality limit.
```console
unset OTEL_GO_X_CARDINALITY_LIMIT
```
### Exemplars
A sample of measurements made may be exported directly as a set of exemplars.
This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
The value of must be the case-insensitive string of `"true"` to enable the feature.
All other values are ignored.
Exemplar filters are a supported.
The exemplar filter applies to all measurements made.
They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
The value must be the case-sensitive string defined by the [OpenTelemetry specification].
- `"always_on"`: allows all measurements
- `"always_off"`: denies all measurements
- `"trace_based"`: allows only sampled measurements
All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
#### Examples
Enable exemplars to be exported.
```console
export OTEL_GO_X_EXEMPLAR=true
```
Disable exemplars from being exported.
```console
unset OTEL_GO_X_EXEMPLAR
```
Set the exemplar filter to allow all measurements.
```console
export OTEL_METRICS_EXEMPLAR_FILTER=always_on
```
Set the exemplar filter to deny all measurements.
```console
export OTEL_METRICS_EXEMPLAR_FILTER=always_off
```
Set the exemplar filter to only allow sampled measurements.
```console
export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
```
Revert to the default exemplar filter (`"trace_based"`)
```console
unset OTEL_METRICS_EXEMPLAR_FILTER
```
## Compatibility and Stability
Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
These features may be removed or modified in successive version releases, including patch versions.
When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.

View file

@ -0,0 +1,96 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package x contains support for OTel metric SDK experimental features.
//
// This package should only be used for features defined in the specification.
// It should not be used for experiments or new project ideas.
package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
import (
"os"
"strconv"
"strings"
)
var (
// Exemplars is an experimental feature flag that defines if exemplars
// should be recorded for metric data-points.
//
// To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
if strings.ToLower(v) == "true" {
return v, true
}
return "", false
})
// CardinalityLimit is an experimental feature flag that defines if
// cardinality limits should be applied to the recorded metric data-points.
//
// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
// variable to the integer limit value you want to use.
//
// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
// will disable the cardinality limits.
CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
n, err := strconv.Atoi(v)
if err != nil {
return 0, false
}
return n, true
})
)
// Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
type Feature[T any] struct {
key string
parse func(v string) (T, bool)
}
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_"
return Feature[T]{
key: envKeyRoot + suffix,
parse: parse,
}
}
// Key returns the environment variable key that needs to be set to enable the
// feature.
func (f Feature[T]) Key() string { return f.key }
// Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a
// zero-value and false are returned.
func (f Feature[T]) Lookup() (v T, ok bool) {
// https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
//
// > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset.
vRaw := os.Getenv(f.key)
if vRaw == "" {
return v, ok
}
return f.parse(vRaw)
}
// Enabled returns if the feature is enabled.
func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup()
return ok
}

View file

@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
)
@ -40,6 +41,11 @@ type meter struct {
scope instrumentation.Scope
pipes pipelines
int64Insts *cacheWithErr[instID, *int64Inst]
float64Insts *cacheWithErr[instID, *float64Inst]
int64ObservableInsts *cacheWithErr[instID, int64Observable]
float64ObservableInsts *cacheWithErr[instID, float64Observable]
int64Resolver resolver[int64]
float64Resolver resolver[float64]
}
@ -49,11 +55,20 @@ func newMeter(s instrumentation.Scope, p pipelines) *meter {
// meter is asked to create are logged to the user.
var viewCache cache[string, instID]
var int64Insts cacheWithErr[instID, *int64Inst]
var float64Insts cacheWithErr[instID, *float64Inst]
var int64ObservableInsts cacheWithErr[instID, int64Observable]
var float64ObservableInsts cacheWithErr[instID, float64Observable]
return &meter{
scope: s,
pipes: p,
int64Resolver: newResolver[int64](p, &viewCache),
float64Resolver: newResolver[float64](p, &viewCache),
scope: s,
pipes: p,
int64Insts: &int64Insts,
float64Insts: &float64Insts,
int64ObservableInsts: &int64ObservableInsts,
float64ObservableInsts: &float64ObservableInsts,
int64Resolver: newResolver[int64](p, &viewCache),
float64Resolver: newResolver[float64](p, &viewCache),
}
}
@ -104,20 +119,62 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
return i, validateInstrumentName(name)
}
// int64ObservableInstrument returns a new observable identified by the Instrument.
// It registers callbacks for each reader's pipeline.
func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
key := instID{
Name: id.Name,
Description: id.Description,
Unit: id.Unit,
Kind: id.Kind,
}
if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
warnRepeatedObservableCallbacks(id)
}
return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
for _, insert := range m.int64Resolver.inserters {
// Connect the measure functions for instruments in this pipeline with the
// callbacks for this pipeline.
in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
if err != nil {
return inst, err
}
// Drop aggregation
if len(in) == 0 {
inst.dropAggregation = true
continue
}
inst.appendMeasures(in)
for _, cback := range callbacks {
inst := int64Observer{measures: in}
fn := cback
insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
}
}
return inst, validateInstrumentName(id.Name)
})
}
// Int64ObservableCounter returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// increasing int64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported.
//
// If Int64ObservableCounter is invoked repeatedly with the same Name,
// Description, and Unit, only the first set of callbacks provided are used.
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
cfg := metric.NewInt64ObservableCounterConfig(options...)
const kind = InstrumentKindObservableCounter
p := int64ObservProvider{m}
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
if err != nil {
return nil, err
id := Instrument{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindObservableCounter,
Scope: m.scope,
}
p.registerCallbacks(inst, cfg.Callbacks())
return inst, validateInstrumentName(name)
return m.int64ObservableInstrument(id, cfg.Callbacks())
}
// Int64ObservableUpDownCounter returns a new instrument identified by name and
@ -126,14 +183,14 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
// measurements recorded during the collection cycle are exported.
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
const kind = InstrumentKindObservableUpDownCounter
p := int64ObservProvider{m}
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
if err != nil {
return nil, err
id := Instrument{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindObservableUpDownCounter,
Scope: m.scope,
}
p.registerCallbacks(inst, cfg.Callbacks())
return inst, validateInstrumentName(name)
return m.int64ObservableInstrument(id, cfg.Callbacks())
}
// Int64ObservableGauge returns a new instrument identified by name and
@ -142,14 +199,14 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
// Only the measurements recorded during the collection cycle are exported.
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
cfg := metric.NewInt64ObservableGaugeConfig(options...)
const kind = InstrumentKindObservableGauge
p := int64ObservProvider{m}
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
if err != nil {
return nil, err
id := Instrument{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindObservableGauge,
Scope: m.scope,
}
p.registerCallbacks(inst, cfg.Callbacks())
return inst, validateInstrumentName(name)
return m.int64ObservableInstrument(id, cfg.Callbacks())
}
// Float64Counter returns a new instrument identified by name and configured
@ -196,20 +253,62 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram
return i, validateInstrumentName(name)
}
// float64ObservableInstrument returns a new observable identified by the Instrument.
// It registers callbacks for each reader's pipeline.
func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
key := instID{
Name: id.Name,
Description: id.Description,
Unit: id.Unit,
Kind: id.Kind,
}
if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
warnRepeatedObservableCallbacks(id)
}
return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
for _, insert := range m.float64Resolver.inserters {
// Connect the measure functions for instruments in this pipeline with the
// callbacks for this pipeline.
in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
if err != nil {
return inst, err
}
// Drop aggregation
if len(in) == 0 {
inst.dropAggregation = true
continue
}
inst.appendMeasures(in)
for _, cback := range callbacks {
inst := float64Observer{measures: in}
fn := cback
insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
}
}
return inst, validateInstrumentName(id.Name)
})
}
// Float64ObservableCounter returns a new instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// increasing float64 measurements once per a measurement collection cycle.
// Only the measurements recorded during the collection cycle are exported.
//
// If Float64ObservableCounter is invoked repeatedly with the same Name,
// Description, and Unit, only the first set of callbacks provided are used.
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
// if instrumentation can be created multiple times with different callbacks.
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
cfg := metric.NewFloat64ObservableCounterConfig(options...)
const kind = InstrumentKindObservableCounter
p := float64ObservProvider{m}
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
if err != nil {
return nil, err
id := Instrument{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindObservableCounter,
Scope: m.scope,
}
p.registerCallbacks(inst, cfg.Callbacks())
return inst, validateInstrumentName(name)
return m.float64ObservableInstrument(id, cfg.Callbacks())
}
// Float64ObservableUpDownCounter returns a new instrument identified by name
@ -218,14 +317,14 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
// measurements recorded during the collection cycle are exported.
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
const kind = InstrumentKindObservableUpDownCounter
p := float64ObservProvider{m}
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
if err != nil {
return nil, err
id := Instrument{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindObservableUpDownCounter,
Scope: m.scope,
}
p.registerCallbacks(inst, cfg.Callbacks())
return inst, validateInstrumentName(name)
return m.float64ObservableInstrument(id, cfg.Callbacks())
}
// Float64ObservableGauge returns a new instrument identified by name and
@ -234,14 +333,14 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
// Only the measurements recorded during the collection cycle are exported.
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
const kind = InstrumentKindObservableGauge
p := float64ObservProvider{m}
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
if err != nil {
return nil, err
id := Instrument{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindObservableGauge,
Scope: m.scope,
}
p.registerCallbacks(inst, cfg.Callbacks())
return inst, validateInstrumentName(name)
return m.float64ObservableInstrument(id, cfg.Callbacks())
}
func validateInstrumentName(name string) error {
@ -273,6 +372,16 @@ func isAlphanumeric(c rune) bool {
return isAlpha(c) || ('0' <= c && c <= '9')
}
func warnRepeatedObservableCallbacks(id Instrument) {
inst := fmt.Sprintf(
"Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
)
global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
"instrument", inst,
)
}
// RegisterCallback registers f to be called each collection cycle so it will
// make observations for insts during those cycles.
//
@ -389,12 +498,14 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
}
if _, registered := r.float64[oImpl.observablID]; !registered {
global.Error(errUnregObserver, "failed to record",
"name", oImpl.name,
"description", oImpl.description,
"unit", oImpl.unit,
"number", fmt.Sprintf("%T", float64(0)),
)
if !oImpl.dropAggregation {
global.Error(errUnregObserver, "failed to record",
"name", oImpl.name,
"description", oImpl.description,
"unit", oImpl.unit,
"number", fmt.Sprintf("%T", float64(0)),
)
}
return
}
c := metric.NewObserveConfig(opts)
@ -422,12 +533,14 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
}
if _, registered := r.int64[oImpl.observablID]; !registered {
global.Error(errUnregObserver, "failed to record",
"name", oImpl.name,
"description", oImpl.description,
"unit", oImpl.unit,
"number", fmt.Sprintf("%T", int64(0)),
)
if !oImpl.dropAggregation {
global.Error(errUnregObserver, "failed to record",
"name", oImpl.name,
"description", oImpl.description,
"unit", oImpl.unit,
"number", fmt.Sprintf("%T", int64(0)),
)
}
return
}
c := metric.NewObserveConfig(opts)
@ -474,14 +587,28 @@ func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramC
// lookup returns the resolved instrumentImpl.
func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
aggs, err := p.aggs(kind, name, desc, u)
return &int64Inst{measures: aggs}, err
return p.meter.int64Insts.Lookup(instID{
Name: name,
Description: desc,
Unit: u,
Kind: kind,
}, func() (*int64Inst, error) {
aggs, err := p.aggs(kind, name, desc, u)
return &int64Inst{measures: aggs}, err
})
}
// lookupHistogram returns the resolved instrumentImpl.
func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
aggs, err := p.histogramAggs(name, cfg)
return &int64Inst{measures: aggs}, err
return p.meter.int64Insts.Lookup(instID{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindHistogram,
}, func() (*int64Inst, error) {
aggs, err := p.histogramAggs(name, cfg)
return &int64Inst{measures: aggs}, err
})
}
// float64InstProvider provides float64 OpenTelemetry instruments.
@ -518,42 +645,33 @@ func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64Histog
// lookup returns the resolved instrumentImpl.
func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
aggs, err := p.aggs(kind, name, desc, u)
return &float64Inst{measures: aggs}, err
return p.meter.float64Insts.Lookup(instID{
Name: name,
Description: desc,
Unit: u,
Kind: kind,
}, func() (*float64Inst, error) {
aggs, err := p.aggs(kind, name, desc, u)
return &float64Inst{measures: aggs}, err
})
}
// lookupHistogram returns the resolved instrumentImpl.
func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
aggs, err := p.histogramAggs(name, cfg)
return &float64Inst{measures: aggs}, err
}
type int64ObservProvider struct{ *meter }
func (p int64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (int64Observable, error) {
aggs, err := (int64InstProvider)(p).aggs(kind, name, desc, u)
return newInt64Observable(p.meter, kind, name, desc, u, aggs), err
}
func (p int64ObservProvider) registerCallbacks(inst int64Observable, cBacks []metric.Int64Callback) {
if inst.observable == nil || len(inst.measures) == 0 {
// Drop aggregator.
return
}
for _, cBack := range cBacks {
p.pipes.registerCallback(p.callback(inst, cBack))
}
}
func (p int64ObservProvider) callback(i int64Observable, f metric.Int64Callback) func(context.Context) error {
inst := int64Observer{int64Observable: i}
return func(ctx context.Context) error { return f(ctx, inst) }
return p.meter.float64Insts.Lookup(instID{
Name: name,
Description: cfg.Description(),
Unit: cfg.Unit(),
Kind: InstrumentKindHistogram,
}, func() (*float64Inst, error) {
aggs, err := p.histogramAggs(name, cfg)
return &float64Inst{measures: aggs}, err
})
}
type int64Observer struct {
embedded.Int64Observer
int64Observable
measures[int64]
}
func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
@ -561,32 +679,9 @@ func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
o.observe(val, c.Attributes())
}
type float64ObservProvider struct{ *meter }
func (p float64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (float64Observable, error) {
aggs, err := (float64InstProvider)(p).aggs(kind, name, desc, u)
return newFloat64Observable(p.meter, kind, name, desc, u, aggs), err
}
func (p float64ObservProvider) registerCallbacks(inst float64Observable, cBacks []metric.Float64Callback) {
if inst.observable == nil || len(inst.measures) == 0 {
// Drop aggregator.
return
}
for _, cBack := range cBacks {
p.pipes.registerCallback(p.callback(inst, cBack))
}
}
func (p float64ObservProvider) callback(i float64Observable, f metric.Float64Callback) func(context.Context) error {
inst := float64Observer{float64Observable: i}
return func(ctx context.Context) error { return f(ctx, inst) }
}
type float64Observer struct {
embedded.Float64Observer
float64Observable
measures[float64]
}
func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {

View file

@ -15,6 +15,7 @@
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
import (
"encoding/json"
"time"
"go.opentelemetry.io/otel/attribute"
@ -211,6 +212,19 @@ type Extrema[N int64 | float64] struct {
valid bool
}
// MarshalText converts the Extrema value to text.
func (e Extrema[N]) MarshalText() ([]byte, error) {
if !e.valid {
return json.Marshal(nil)
}
return json.Marshal(e.value)
}
// MarshalJSON converts the Extrema value to JSON number.
func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
return e.MarshalText()
}
// NewExtrema returns an Extrema set to v.
func NewExtrema[N int64 | float64](v N) Extrema[N] {
return Extrema[N]{value: v, valid: true}

View file

@ -29,6 +29,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/internal"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
"go.opentelemetry.io/otel/sdk/metric/internal/x"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/resource"
)
@ -93,14 +94,6 @@ func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
p.aggregations[scope] = append(p.aggregations[scope], iSync)
}
// addCallback registers a single instrument callback to be run when
// `produce()` is called.
func (p *pipeline) addCallback(cback func(context.Context) error) {
p.Lock()
defer p.Unlock()
p.callbacks = append(p.callbacks, cback)
}
type multiCallback func(context.Context) error
// addMultiCallback registers a multi-instrument callback to be run when
@ -281,6 +274,14 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
return measures, errs.errorOrNil()
}
// addCallback registers a single instrument callback to be run when
// `produce()` is called.
func (i *inserter[N]) addCallback(cback func(context.Context) error) {
i.pipeline.Lock()
defer i.pipeline.Unlock()
i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
}
var aggIDCount uint64
// aggVal is the cached value in an aggregators cache.
@ -358,9 +359,16 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
normID := id.normalize()
cv := i.aggregators.Lookup(normID, func() aggVal[N] {
b := aggregate.Builder[N]{
Temporality: i.pipeline.reader.temporality(kind),
Temporality: i.pipeline.reader.temporality(kind),
ReservoirFunc: reservoirFunc[N](stream.Aggregation),
}
b.Filter = stream.AttributeFilter
// A value less than or equal to zero will disable the aggregation
// limits for the builder (an all the created aggregates).
// CardinalityLimit.Lookup returns 0 by default if unset (or
// unrecognized input). Use that value directly.
b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
if err != nil {
return aggVal[N]{0, nil, err}
@ -557,12 +565,6 @@ func newPipelines(res *resource.Resource, readers []Reader, views []View) pipeli
return pipes
}
func (p pipelines) registerCallback(cback func(context.Context) error) {
for _, pipe := range p {
pipe.addCallback(cback)
}
}
func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
unregs := make([]func(), len(p))
for i, pipe := range p {

View file

@ -16,5 +16,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
return "1.20.0"
return "1.24.0"
}

View file

@ -41,8 +41,20 @@ type Detector interface {
// must never be done outside of a new major release.
}
// Detect calls all input detectors sequentially and merges each result with the previous one.
// It returns the merged error too.
// Detect returns a new [Resource] merged from all the Resources each of the
// detectors produces. Each of the detectors are called sequentially, in the
// order they are passed, merging the produced resource into the previous.
//
// This may return a partial Resource along with an error containing
// [ErrPartialResource] if that error is returned from a detector. It may also
// return a merge-conflicting Resource along with an error containing
// [ErrSchemaURLConflict] if merging Resources from different detectors results
// in a schema URL conflict. It is up to the caller to determine if this
// returned Resource should be used or not.
//
// If one of the detectors returns an error that is not [ErrPartialResource],
// the resource produced by the detector will not be merged and the returned
// error will wrap that detector's error.
func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
r := new(Resource)
return r, detect(ctx, r, detectors)
@ -50,6 +62,10 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
// detect runs all detectors using ctx and merges the result into res. This
// assumes res is allocated and not nil, it will panic otherwise.
//
// If the detectors or merging resources produces any errors (i.e.
// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
// these errors will be returned. Otherwise, nil is returned.
func detect(ctx context.Context, res *Resource, detectors []Detector) error {
var (
r *Resource
@ -78,6 +94,11 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error {
if len(errs) == 0 {
return nil
}
if errors.Is(errs, ErrSchemaURLConflict) {
// If there has been a merge conflict, ensure the resource has no
// schema URL.
res.schemaURL = ""
}
return errs
}

View file

@ -22,7 +22,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type (

View file

@ -22,14 +22,14 @@ import (
"os"
"regexp"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type containerIDProvider func() (string, error)
var (
containerID containerIDProvider = getContainerIDFromCGroup
cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`)
cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
)
type cgroupContainerIDDetector struct{}

View file

@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
const (

View file

@ -19,7 +19,7 @@ import (
"errors"
"strings"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type hostIDProvider func() (string, error)

View file

@ -19,7 +19,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type osDescriptionProvider func() (string, error)

View file

@ -22,7 +22,7 @@ import (
"path/filepath"
"runtime"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
)
type (

View file

@ -17,6 +17,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"context"
"errors"
"fmt"
"sync"
"go.opentelemetry.io/otel"
@ -40,9 +41,20 @@ var (
defaultResourceOnce sync.Once
)
var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL")
// ErrSchemaURLConflict is an error returned when two Resources are merged
// together that contain different, non-empty, schema URLs.
var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
// New returns a Resource combined from the user-provided detectors.
// New returns a [Resource] built using opts.
//
// This may return a partial Resource along with an error containing
// [ErrPartialResource] if options that provide a [Detector] are used and that
// error is returned from one or more of the Detectors. It may also return a
// merge-conflict Resource along with an error containing
// [ErrSchemaURLConflict] if merging Resources from the opts results in a
// schema URL conflict (see [Resource.Merge] for more information). It is up to
// the caller to determine if this returned Resource should be used or not
// based on these errors.
func New(ctx context.Context, opts ...Option) (*Resource, error) {
cfg := config{}
for _, opt := range opts {
@ -98,7 +110,7 @@ func (r *Resource) String() string {
return r.attrs.Encoded(attribute.DefaultEncoder())
}
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
// MarshalLog is the marshaling function used by the logging system to represent this Resource.
func (r *Resource) MarshalLog() interface{} {
return struct {
Attributes attribute.Set
@ -146,16 +158,29 @@ func (r *Resource) Equal(eq *Resource) bool {
return r.Equivalent() == eq.Equivalent()
}
// Merge creates a new resource by combining resource a and b.
// Merge creates a new [Resource] by merging a and b.
//
// If there are common keys between resource a and b, then the value
// from resource b will overwrite the value from resource a, even
// if resource b's value is empty.
// If there are common keys between a and b, then the value from b will
// overwrite the value from a, even if b's value is empty.
//
// The SchemaURL of the resources will be merged according to the spec rules:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
// If the resources have different non-empty schemaURL an empty resource and an error
// will be returned.
// The SchemaURL of the resources will be merged according to the
// [OpenTelemetry specification rules]:
//
// - If a's schema URL is empty then the returned Resource's schema URL will
// be set to the schema URL of b,
// - Else if b's schema URL is empty then the returned Resource's schema URL
// will be set to the schema URL of a,
// - Else if the schema URLs of a and b are the same then that will be the
// schema URL of the returned Resource,
// - Else this is a merging error. If the resources have different,
// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
// be returned with the merged Resource. The merged Resource will have an
// empty schema URL. It may be the case that some unintended attributes
// have been overwritten or old semantic conventions persisted in the
// returned Resource. It is up to the caller to determine if this returned
// Resource should be used or not.
//
// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
func Merge(a, b *Resource) (*Resource, error) {
if a == nil && b == nil {
return Empty(), nil
@ -167,19 +192,6 @@ func Merge(a, b *Resource) (*Resource, error) {
return a, nil
}
// Merge the schema URL.
var schemaURL string
switch true {
case a.schemaURL == "":
schemaURL = b.schemaURL
case b.schemaURL == "":
schemaURL = a.schemaURL
case a.schemaURL == b.schemaURL:
schemaURL = a.schemaURL
default:
return Empty(), errMergeConflictSchemaURL
}
// Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
// Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
mi := attribute.NewMergeIterator(b.Set(), a.Set())
@ -187,8 +199,23 @@ func Merge(a, b *Resource) (*Resource, error) {
for mi.Next() {
combine = append(combine, mi.Attribute())
}
merged := NewWithAttributes(schemaURL, combine...)
return merged, nil
switch {
case a.schemaURL == "":
return NewWithAttributes(b.schemaURL, combine...), nil
case b.schemaURL == "":
return NewWithAttributes(a.schemaURL, combine...), nil
case a.schemaURL == b.schemaURL:
return NewWithAttributes(a.schemaURL, combine...), nil
}
// Return the merged resource with an appropriate error. It is up to
// the user to decide if the returned resource can be used or not.
return NewSchemaless(combine...), fmt.Errorf(
"%w: %s and %s",
ErrSchemaURLConflict,
a.schemaURL,
b.schemaURL,
)
}
// Empty returns an instance of Resource with no attributes. It is

View file

@ -406,7 +406,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan)
return false
}
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
func (bsp *batchSpanProcessor) MarshalLog() interface{} {
return struct {
Type string

View file

@ -55,7 +55,7 @@ type tracerProviderConfig struct {
resource *resource.Resource
}
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
func (cfg tracerProviderConfig) MarshalLog() interface{} {
return struct {
SpanProcessors []SpanProcessor

View file

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@ -208,6 +208,16 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
s.status = status
}
// ensureAttributesCapacity inlines functionality from slices.Grow
// so that we can avoid needing to import golang.org/x/exp for go1.20.
// Once support for go1.20 is dropped, we can use slices.Grow available since go1.21 instead.
// Tracking issue: https://github.com/open-telemetry/opentelemetry-go/issues/4819.
func (s *recordingSpan) ensureAttributesCapacity(minCapacity int) {
if n := minCapacity - cap(s.attributes); n > 0 {
s.attributes = append(s.attributes[:cap(s.attributes)], make([]attribute.KeyValue, n)...)[:len(s.attributes)]
}
}
// SetAttributes sets attributes of this span.
//
// If a key from attributes already exists the value associated with that key
@ -242,6 +252,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
// Otherwise, add without deduplication. When attributes are read they
// will be deduplicated, optimizing the operation.
s.ensureAttributesCapacity(len(s.attributes) + len(attributes))
for _, a := range attributes {
if !a.Valid() {
// Drop all invalid attributes.
@ -277,6 +288,12 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
// Now that s.attributes is deduplicated, adding unique attributes up to
// the capacity of s will not over allocate s.attributes.
if sum := len(attrs) + len(s.attributes); sum < limit {
// After support for go1.20 is dropped, simplify if-else to min(sum, limit).
s.ensureAttributesCapacity(sum)
} else {
s.ensureAttributesCapacity(limit)
}
for _, a := range attrs {
if !a.Valid() {
// Drop all invalid attributes.

View file

@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
return "1.20.0"
return "1.24.0"
}