[chore] bump dependencies (#4406)

- codeberg.org/gruf/go-ffmpreg: v0.6.9 -> v0.6.10
- github.com/ncruces/go-sqlite3: v0.27.1 -> v0.28.0
- github.com/stretchr/testify: v1.10.0 -> v1.11.1
- github.com/tdewolff/minify/v2 v2.23.11 -> v2.24.2
- go.opentelemetry.io/otel{,/*}: v1.37.0 -> v1.38.0
- go.opentelemetry.io/contrib/*: v0.62.0 -> v0.63.0

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4406
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-09-04 15:29:27 +02:00 committed by kim
commit 78defcd916
274 changed files with 9213 additions and 2368 deletions

View file

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -329,7 +329,7 @@ func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int {
origRead := q.read
n := min(len(buf), q.len)
for i := 0; i < n; i++ {
for i := range n {
buf[i] = q.read.Value
q.read = q.read.Next()
}

View file

@ -30,6 +30,9 @@ should be used to describe the unique runtime environment instrumented code
is being run on. That way when multiple instances of the code are collected
at a single endpoint their origin is decipherable.
See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about
the experimental features.
See [go.opentelemetry.io/otel/log] for more information about
the OpenTelemetry Logs API.
*/

View file

@ -30,7 +30,7 @@ import (
// It provides a Processor used to filter out [Record]
// that has a [log.Severity] below a threshold.
type FilterProcessor interface {
// Enabled returns whether the Processor will process for the given context
// Enabled reports whether the Processor will process for the given context
// and param.
//
// The passed param is likely to be a partial record information being

View file

@ -0,0 +1,34 @@
# Experimental Features
The Logs SDK contains features that have not yet stabilized in the OpenTelemetry specification.
These features are added to the OpenTelemetry Go Logs SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
These feature may change in backwards incompatible ways as feedback is applied.
See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
## Features
- [Self-Observability](#self-observability)
### Self-Observability
The Logs SDK provides a self-observability feature that allows you to monitor the SDK itself.
To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
When enabled, the SDK will create the following metrics using the global `MeterProvider`:
- `otel.sdk.log.created`
Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
## Compatibility and Stability
Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
These features may be removed or modified in successive version releases, including patch versions.
When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.

View file

@ -0,0 +1,63 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/log].
package x // import "go.opentelemetry.io/otel/sdk/log/internal/x"
import (
"os"
"strings"
)
// SelfObservability is an experimental feature flag that determines if SDK
// self-observability metrics are enabled.
//
// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
})
// Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
type Feature[T any] struct {
key string
parse func(v string) (T, bool)
}
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_"
return Feature[T]{
key: envKeyRoot + suffix,
parse: parse,
}
}
// Key returns the environment variable key that needs to be set to enable the
// feature.
func (f Feature[T]) Key() string { return f.key }
// Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a
// zero-value and false are returned.
func (f Feature[T]) Lookup() (v T, ok bool) {
// https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
//
// > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset.
vRaw := os.Getenv(f.key)
if vRaw == "" {
return v, ok
}
return f.parse(vRaw)
}
// Enabled reports whether the feature is enabled.
func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup()
return ok
}

View file

@ -5,12 +5,18 @@ package log // import "go.opentelemetry.io/otel/sdk/log"
import (
"context"
"fmt"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/log"
"go.opentelemetry.io/otel/log/embedded"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/log/internal/x"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
)
@ -24,13 +30,31 @@ type logger struct {
provider *LoggerProvider
instrumentationScope instrumentation.Scope
selfObservabilityEnabled bool
logCreatedMetric otelconv.SDKLogCreated
}
func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger {
return &logger{
l := &logger{
provider: p,
instrumentationScope: scope,
}
if !x.SelfObservability.Enabled() {
return l
}
l.selfObservabilityEnabled = true
mp := otel.GetMeterProvider()
m := mp.Meter("go.opentelemetry.io/otel/sdk/log",
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(semconv.SchemaURL))
var err error
if l.logCreatedMetric, err = otelconv.NewSDKLogCreated(m); err != nil {
err = fmt.Errorf("failed to create log created metric: %w", err)
otel.Handle(err)
}
return l
}
func (l *logger) Emit(ctx context.Context, r log.Record) {
@ -84,7 +108,6 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record {
observedTimestamp: r.ObservedTimestamp(),
severity: r.Severity(),
severityText: r.SeverityText(),
body: r.Body(),
traceID: sc.TraceID(),
spanID: sc.SpanID(),
@ -94,7 +117,14 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record {
scope: &l.instrumentationScope,
attributeValueLengthLimit: l.provider.attributeValueLengthLimit,
attributeCountLimit: l.provider.attributeCountLimit,
allowDupKeys: l.provider.allowDupKeys,
}
if l.selfObservabilityEnabled {
l.logCreatedMetric.Add(ctx, 1)
}
// This ensures we deduplicate key-value collections in the log body
newRecord.SetBody(r.Body())
// This field SHOULD be set once the event is observed by OpenTelemetry.
if newRecord.observedTimestamp.IsZero() {

View file

@ -32,6 +32,7 @@ type providerConfig struct {
fltrProcessors []FilterProcessor
attrCntLim setting[int]
attrValLenLim setting[int]
allowDupKeys setting[bool]
}
func newProviderConfig(opts []LoggerProviderOption) providerConfig {
@ -67,6 +68,7 @@ type LoggerProvider struct {
fltrProcessors []FilterProcessor
attributeCountLimit int
attributeValueLengthLimit int
allowDupKeys bool
loggersMu sync.Mutex
loggers map[instrumentation.Scope]*logger
@ -93,6 +95,7 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider {
fltrProcessors: cfg.fltrProcessors,
attributeCountLimit: cfg.attrCntLim.Value,
attributeValueLengthLimit: cfg.attrValLenLim.Value,
allowDupKeys: cfg.allowDupKeys.Value,
}
}
@ -254,3 +257,21 @@ func WithAttributeValueLengthLimit(limit int) LoggerProviderOption {
return cfg
})
}
// WithAllowKeyDuplication sets whether deduplication is skipped for log attributes or other key-value collections.
//
// By default, the key-value collections within a log record are deduplicated to comply with the OpenTelemetry Specification.
// Deduplication means that if multiple keyvalue pairs with the same key are present, only a single pair
// is retained and others are discarded.
//
// Disabling deduplication with this option can improve performance e.g. of adding attributes to the log record.
//
// Note that if you disable deduplication, you are responsible for ensuring that duplicate
// key-value pairs within in a single collection are not emitted,
// or that the telemetry receiver can handle such duplicates.
func WithAllowKeyDuplication() LoggerProviderOption {
return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
cfg.allowDupKeys = newSetting(true)
return cfg
})
}

View file

@ -93,6 +93,9 @@ type Record struct {
attributeValueLengthLimit int
attributeCountLimit int
// specifies whether we should deduplicate any key value collections or not
allowDupKeys bool
noCmp [0]func() //nolint: unused // This is indeed used.
}
@ -167,7 +170,11 @@ func (r *Record) Body() log.Value {
// SetBody sets the body of the log record.
func (r *Record) SetBody(v log.Value) {
r.body = v
if !r.allowDupKeys {
r.body = r.dedupeBodyCollections(v)
} else {
r.body = v
}
}
// WalkAttributes walks all attributes the log record holds by calling f for
@ -192,56 +199,60 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) {
if n == 0 {
// Avoid the more complex duplicate map lookups below.
var drop int
attrs, drop = dedup(attrs)
r.setDropped(drop)
if !r.allowDupKeys {
attrs, drop = dedup(attrs)
r.setDropped(drop)
}
attrs, drop = head(attrs, r.attributeCountLimit)
attrs, drop := head(attrs, r.attributeCountLimit)
r.addDropped(drop)
r.addAttrs(attrs)
return
}
// Used to find duplicates between attrs and existing attributes in r.
rIndex := r.attrIndex()
defer putIndex(rIndex)
if !r.allowDupKeys {
// Used to find duplicates between attrs and existing attributes in r.
rIndex := r.attrIndex()
defer putIndex(rIndex)
// Unique attrs that need to be added to r. This uses the same underlying
// array as attrs.
//
// Note, do not iterate attrs twice by just calling dedup(attrs) here.
unique := attrs[:0]
// Used to find duplicates within attrs itself. The index value is the
// index of the element in unique.
uIndex := getIndex()
defer putIndex(uIndex)
// Unique attrs that need to be added to r. This uses the same underlying
// array as attrs.
//
// Note, do not iterate attrs twice by just calling dedup(attrs) here.
unique := attrs[:0]
// Used to find duplicates within attrs itself. The index value is the
// index of the element in unique.
uIndex := getIndex()
defer putIndex(uIndex)
// Deduplicate attrs within the scope of all existing attributes.
for _, a := range attrs {
// Last-value-wins for any duplicates in attrs.
idx, found := uIndex[a.Key]
if found {
r.addDropped(1)
unique[idx] = a
continue
}
idx, found = rIndex[a.Key]
if found {
// New attrs overwrite any existing with the same key.
r.addDropped(1)
if idx < 0 {
r.front[-(idx + 1)] = a
} else {
r.back[idx] = a
// Deduplicate attrs within the scope of all existing attributes.
for _, a := range attrs {
// Last-value-wins for any duplicates in attrs.
idx, found := uIndex[a.Key]
if found {
r.addDropped(1)
unique[idx] = a
continue
}
idx, found = rIndex[a.Key]
if found {
// New attrs overwrite any existing with the same key.
r.addDropped(1)
if idx < 0 {
r.front[-(idx + 1)] = a
} else {
r.back[idx] = a
}
} else {
// Unique attribute.
unique = append(unique, a)
uIndex[a.Key] = len(unique) - 1
}
} else {
// Unique attribute.
unique = append(unique, a)
uIndex[a.Key] = len(unique) - 1
}
attrs = unique
}
attrs = unique
if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit {
// Truncate the now unique attributes to comply with limit.
@ -297,8 +308,11 @@ func (r *Record) addAttrs(attrs []log.KeyValue) {
// SetAttributes sets (and overrides) attributes to the log record.
func (r *Record) SetAttributes(attrs ...log.KeyValue) {
var drop int
attrs, drop = dedup(attrs)
r.setDropped(drop)
r.setDropped(0)
if !r.allowDupKeys {
attrs, drop = dedup(attrs)
r.setDropped(drop)
}
attrs, drop = head(attrs, r.attributeCountLimit)
r.addDropped(drop)
@ -426,10 +440,14 @@ func (r *Record) applyValueLimits(val log.Value) log.Value {
}
val = log.SliceValue(sl...)
case log.KindMap:
// Deduplicate then truncate. Do not do at the same time to avoid
// wasted truncation operations.
kvs, dropped := dedup(val.AsMap())
r.addDropped(dropped)
kvs := val.AsMap()
if !r.allowDupKeys {
// Deduplicate then truncate. Do not do at the same time to avoid
// wasted truncation operations.
var dropped int
kvs, dropped = dedup(kvs)
r.addDropped(dropped)
}
for i := range kvs {
kvs[i] = r.applyAttrLimits(kvs[i])
}
@ -438,6 +456,24 @@ func (r *Record) applyValueLimits(val log.Value) log.Value {
return val
}
func (r *Record) dedupeBodyCollections(val log.Value) log.Value {
switch val.Kind() {
case log.KindSlice:
sl := val.AsSlice()
for i := range sl {
sl[i] = r.dedupeBodyCollections(sl[i])
}
val = log.SliceValue(sl...)
case log.KindMap:
kvs, _ := dedup(val.AsMap())
for i := range kvs {
kvs[i].Value = r.dedupeBodyCollections(kvs[i].Value)
}
val = log.MapValue(kvs...)
}
return val
}
// truncate returns a truncated version of s such that it contains less than
// the limit number of characters. Truncation is applied by returning the limit
// number of valid characters contained in s.