[chore] update dependencies (#4188)

Update dependencies:
- github.com/gin-gonic/gin v1.10.0 -> v1.10.1
- github.com/gin-contrib/sessions v1.10.3 -> v1.10.4
- github.com/jackc/pgx/v5 v5.7.4 -> v5.7.5
- github.com/minio/minio-go/v7 v7.0.91 -> v7.0.92
- github.com/pquerna/otp v1.4.0 -> v1.5.0
- github.com/tdewolff/minify/v2 v2.23.5 -> v2.23.8
- github.com/yuin/goldmark v1.7.11 -> v1.7.12
- go.opentelemetry.io/otel{,/*} v1.35.0 -> v1.36.0
- modernc.org/sqlite v1.37.0 -> v1.37.1

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4188
Reviewed-by: Daenney <daenney@noreply.codeberg.org>
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-05-22 16:27:55 +02:00 committed by kim
commit b6ff55662e
214 changed files with 44839 additions and 32023 deletions

View file

@ -121,7 +121,10 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
// ExplicitBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
func (b Builder[N]) ExplicitBucketHistogram(
boundaries []float64,
noMinMax, noSum bool,
) (Measure[N], ComputeAggregation) {
h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
@ -133,7 +136,10 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu
// ExponentialBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
func (b Builder[N]) ExponentialBucketHistogram(
maxSize, maxScale int32,
noMinMax, noSum bool,
) (Measure[N], ComputeAggregation) {
h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:

View file

@ -48,7 +48,12 @@ type expoHistogramDataPoint[N int64 | float64] struct {
zeroCount uint64
}
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
func newExpoHistogramDataPoint[N int64 | float64](
attrs attribute.Set,
maxSize int,
maxScale int32,
noMinMax, noSum bool,
) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
f := math.MaxFloat64
ma := N(f) // if N is int64, max will overflow to -9223372036854775808
mi := N(-f)
@ -283,7 +288,12 @@ func (b *expoBuckets) downscale(delta int32) {
// newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in.
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] {
func newExponentialHistogram[N int64 | float64](
maxSize, maxScale int32,
noMinMax, noSum bool,
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
) *expoHistogram[N] {
return &expoHistogram[N]{
noSum: noSum,
noMinMax: noMinMax,
@ -314,7 +324,12 @@ type expoHistogram[N int64 | float64] struct {
start time.Time
}
func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
func (e *expoHistogram[N]) measure(
ctx context.Context,
value N,
fltrAttr attribute.Set,
droppedAttr []attribute.KeyValue,
) {
// Ignore NaN and infinity.
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
return
@ -360,11 +375,19 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].ZeroThreshold = 0.0
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
hDPts[i].PositiveBucket.Counts = reset(
hDPts[i].PositiveBucket.Counts,
len(val.posBuckets.counts),
len(val.posBuckets.counts),
)
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
hDPts[i].NegativeBucket.Counts = reset(
hDPts[i].NegativeBucket.Counts,
len(val.negBuckets.counts),
len(val.negBuckets.counts),
)
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
if !e.noSum {
@ -413,11 +436,19 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].ZeroThreshold = 0.0
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
hDPts[i].PositiveBucket.Counts = reset(
hDPts[i].PositiveBucket.Counts,
len(val.posBuckets.counts),
len(val.posBuckets.counts),
)
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
hDPts[i].NegativeBucket.Counts = reset(
hDPts[i].NegativeBucket.Counts,
len(val.negBuckets.counts),
len(val.negBuckets.counts),
)
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
if !e.noSum {

View file

@ -33,7 +33,10 @@ type filteredExemplarReservoir[N int64 | float64] struct {
// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
// that are allowed by the filter.
func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] {
func NewFilteredExemplarReservoir[N int64 | float64](
f exemplar.Filter,
r exemplar.Reservoir,
) FilteredExemplarReservoir[N] {
return &filteredExemplarReservoir[N]{
filter: f,
reservoir: r,

View file

@ -53,7 +53,12 @@ type histValues[N int64 | float64] struct {
valuesMu sync.Mutex
}
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] {
func newHistValues[N int64 | float64](
bounds []float64,
noSum bool,
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
) *histValues[N] {
// The responsibility of keeping all buckets correctly associated with the
// passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have
@ -71,7 +76,12 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r
// Aggregate records the measurement value, scoped by attr, and aggregates it
// into a histogram.
func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
func (s *histValues[N]) measure(
ctx context.Context,
value N,
fltrAttr attribute.Set,
droppedAttr []attribute.KeyValue,
) {
// This search will return an index in the range [0, len(s.bounds)], where
// it will return len(s.bounds) if value is greater than the last element
// of s.bounds. This aligns with the buckets in that the length of buckets
@ -108,7 +118,12 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
// newHistogram returns an Aggregator that summarizes a set of measurements as
// an histogram.
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] {
func newHistogram[N int64 | float64](
boundaries []float64,
noMinMax, noSum bool,
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
) *histogram[N] {
return &histogram[N]{
histValues: newHistValues[N](boundaries, noSum, limit, r),
noMinMax: noMinMax,

View file

@ -114,7 +114,10 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in
// newPrecomputedLastValue returns an aggregator that summarizes a set of
// observations as the last one made.
func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
func newPrecomputedLastValue[N int64 | float64](
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
) *precomputedLastValue[N] {
return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
}

View file

@ -143,7 +143,11 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
// newPrecomputedSum returns an aggregator that summarizes a set of
// observations as their arithmetic sum. Each sum is scoped by attributes and
// the aggregation cycle the measurements were made in.
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] {
func newPrecomputedSum[N int64 | float64](
monotonic bool,
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
) *precomputedSum[N] {
return &precomputedSum[N]{
valueMap: newValueMap[N](limit, r),
monotonic: monotonic,

View file

@ -1,6 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package internal provides internal functionality for the metric package.
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
// ReuseSlice returns a zeroed view of slice if its capacity is greater than or