mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-12-09 22:08:08 -06:00
[chore] update go dependencies (#4304)
- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix
Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
7712885038
commit
8b0ea56027
294 changed files with 139999 additions and 21873 deletions
7
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
|
|
@ -125,9 +125,8 @@ func WithoutCounterSuffixes() Option {
|
|||
})
|
||||
}
|
||||
|
||||
// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric.
|
||||
// If not specified, the Exporter will create a otel_scope_info metric containing
|
||||
// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points.
|
||||
// WithoutScopeInfo configures the Exporter to not export
|
||||
// labels about Instrumentation Scope to all metric points.
|
||||
func WithoutScopeInfo() Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.disableScopeInfo = true
|
||||
|
|
@ -136,7 +135,7 @@ func WithoutScopeInfo() Option {
|
|||
}
|
||||
|
||||
// WithNamespace configures the Exporter to prefix metric with the given namespace.
|
||||
// Metadata metrics such as target_info and otel_scope_info are not prefixed since these
|
||||
// Metadata metrics such as target_info are not prefixed since these
|
||||
// have special behavior based on their name.
|
||||
func WithNamespace(ns string) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
|
|
|
|||
172
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
172
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
|
|
@ -21,7 +21,6 @@ import (
|
|||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
|
|
@ -31,25 +30,20 @@ const (
|
|||
targetInfoMetricName = "target_info"
|
||||
targetInfoDescription = "Target metadata"
|
||||
|
||||
scopeInfoMetricName = "otel_scope_info"
|
||||
scopeInfoDescription = "Instrumentation Scope metadata"
|
||||
|
||||
scopeNameLabel = "otel_scope_name"
|
||||
scopeVersionLabel = "otel_scope_version"
|
||||
scopeLabelPrefix = "otel_scope_"
|
||||
scopeNameLabel = scopeLabelPrefix + "name"
|
||||
scopeVersionLabel = scopeLabelPrefix + "version"
|
||||
scopeSchemaLabel = scopeLabelPrefix + "schema_url"
|
||||
|
||||
traceIDExemplarKey = "trace_id"
|
||||
spanIDExemplarKey = "span_id"
|
||||
)
|
||||
|
||||
var (
|
||||
errScopeInvalid = errors.New("invalid scope")
|
||||
|
||||
metricsPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &metricdata.ResourceMetrics{}
|
||||
},
|
||||
}
|
||||
)
|
||||
var metricsPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &metricdata.ResourceMetrics{}
|
||||
},
|
||||
}
|
||||
|
||||
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
|
||||
// interface for easy instantiation with a MeterProvider.
|
||||
|
|
@ -97,8 +91,6 @@ type collector struct {
|
|||
mu sync.Mutex // mu protects all members below from the concurrent access.
|
||||
disableTargetInfo bool
|
||||
targetInfo prometheus.Metric
|
||||
scopeInfos map[instrumentation.Scope]prometheus.Metric
|
||||
scopeInfosInvalid map[instrumentation.Scope]struct{}
|
||||
metricFamilies map[string]*dto.MetricFamily
|
||||
resourceKeyVals keyVals
|
||||
}
|
||||
|
|
@ -122,8 +114,6 @@ func New(opts ...Option) (*Exporter, error) {
|
|||
withoutUnits: cfg.withoutUnits,
|
||||
withoutCounterSuffixes: cfg.withoutCounterSuffixes,
|
||||
disableScopeInfo: cfg.disableScopeInfo,
|
||||
scopeInfos: make(map[instrumentation.Scope]prometheus.Metric),
|
||||
scopeInfosInvalid: make(map[instrumentation.Scope]struct{}),
|
||||
metricFamilies: make(map[string]*dto.MetricFamily),
|
||||
namespace: cfg.namespace,
|
||||
resourceAttributesFilter: cfg.resourceAttributesFilter,
|
||||
|
|
@ -202,20 +192,15 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||
}
|
||||
|
||||
if !c.disableScopeInfo {
|
||||
scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
|
||||
if errors.Is(err, errScopeInvalid) {
|
||||
// Do not report the same error multiple times.
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel)
|
||||
kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL)
|
||||
|
||||
ch <- scopeInfo
|
||||
|
||||
kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel)
|
||||
kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version)
|
||||
attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes)
|
||||
for i := range attrKeys {
|
||||
attrKeys[i] = scopeLabelPrefix + attrKeys[i]
|
||||
}
|
||||
kv.keys = append(kv.keys, attrKeys...)
|
||||
kv.vals = append(kv.vals, attrVals...)
|
||||
}
|
||||
|
||||
kv.keys = append(kv.keys, c.resourceKeyVals.keys...)
|
||||
|
|
@ -259,6 +244,59 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||
}
|
||||
}
|
||||
|
||||
// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution.
|
||||
func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket {
|
||||
if len(bucket.Counts) == 0 || scaleDelta < 1 {
|
||||
return metricdata.ExponentialBucket{
|
||||
Offset: bucket.Offset >> scaleDelta,
|
||||
Counts: append([]uint64(nil), bucket.Counts...), // copy slice
|
||||
}
|
||||
}
|
||||
|
||||
// The new offset is scaled down
|
||||
newOffset := bucket.Offset >> scaleDelta
|
||||
|
||||
// Pre-calculate the new bucket count to avoid growing slice
|
||||
// Each group of 2^scaleDelta buckets will merge into one bucket
|
||||
//nolint:gosec // Length is bounded by slice allocation
|
||||
lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1
|
||||
lastNewIdx := lastBucketIdx >> scaleDelta
|
||||
newBucketCount := int(lastNewIdx - newOffset + 1)
|
||||
|
||||
if newBucketCount <= 0 {
|
||||
return metricdata.ExponentialBucket{
|
||||
Offset: newOffset,
|
||||
Counts: []uint64{},
|
||||
}
|
||||
}
|
||||
|
||||
newCounts := make([]uint64, newBucketCount)
|
||||
|
||||
// Merge buckets according to the scale difference
|
||||
for i, count := range bucket.Counts {
|
||||
if count == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate which new bucket this count belongs to
|
||||
//nolint:gosec // Index is bounded by loop iteration
|
||||
originalIdx := bucket.Offset + int32(i)
|
||||
newIdx := originalIdx >> scaleDelta
|
||||
|
||||
// Calculate the position in the new counts array
|
||||
position := newIdx - newOffset
|
||||
//nolint:gosec // Length is bounded by allocation
|
||||
if position >= 0 && position < int32(len(newCounts)) {
|
||||
newCounts[position] += count
|
||||
}
|
||||
}
|
||||
|
||||
return metricdata.ExponentialBucket{
|
||||
Offset: newOffset,
|
||||
Counts: newCounts,
|
||||
}
|
||||
}
|
||||
|
||||
func addExponentialHistogramMetric[N int64 | float64](
|
||||
ch chan<- prometheus.Metric,
|
||||
histogram metricdata.ExponentialHistogram[N],
|
||||
|
|
@ -273,23 +311,43 @@ func addExponentialHistogramMetric[N int64 | float64](
|
|||
|
||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||
|
||||
// Prometheus native histograms support scales in the range [-4, 8]
|
||||
scale := dp.Scale
|
||||
if scale < -4 {
|
||||
// Reject scales below -4 as they cannot be represented in Prometheus
|
||||
otel.Handle(fmt.Errorf(
|
||||
"exponential histogram scale %d is below minimum supported scale -4, skipping data point",
|
||||
scale))
|
||||
continue
|
||||
}
|
||||
|
||||
// If scale > 8, we need to downscale the buckets to match the clamped scale
|
||||
positiveBucket := dp.PositiveBucket
|
||||
negativeBucket := dp.NegativeBucket
|
||||
if scale > 8 {
|
||||
scaleDelta := scale - 8
|
||||
positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta)
|
||||
negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta)
|
||||
scale = 8
|
||||
}
|
||||
|
||||
// From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one.
|
||||
positiveBuckets := make(map[int]int64)
|
||||
for i, c := range dp.PositiveBucket.Counts {
|
||||
for i, c := range positiveBucket.Counts {
|
||||
if c > math.MaxInt64 {
|
||||
otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c))
|
||||
continue
|
||||
}
|
||||
positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
|
||||
positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
|
||||
}
|
||||
|
||||
negativeBuckets := make(map[int]int64)
|
||||
for i, c := range dp.NegativeBucket.Counts {
|
||||
for i, c := range negativeBucket.Counts {
|
||||
if c > math.MaxInt64 {
|
||||
otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c))
|
||||
continue
|
||||
}
|
||||
negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
|
||||
negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
|
||||
}
|
||||
|
||||
m, err := prometheus.NewConstNativeHistogram(
|
||||
|
|
@ -299,7 +357,7 @@ func addExponentialHistogramMetric[N int64 | float64](
|
|||
positiveBuckets,
|
||||
negativeBuckets,
|
||||
dp.ZeroCount,
|
||||
dp.Scale,
|
||||
scale,
|
||||
dp.ZeroThreshold,
|
||||
dp.StartTime,
|
||||
values...)
|
||||
|
|
@ -440,15 +498,11 @@ func createInfoMetric(name, description string, res *resource.Resource) (prometh
|
|||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
||||
}
|
||||
|
||||
func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) {
|
||||
attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version
|
||||
attrs = append(attrs, scope.Attributes.ToSlice()...)
|
||||
attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name))
|
||||
attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version))
|
||||
|
||||
keys, values := getAttrs(attribute.NewSet(attrs...))
|
||||
desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil)
|
||||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
||||
func unitMapGetOrDefault(unit string) string {
|
||||
if promUnit, ok := unitSuffixes[unit]; ok {
|
||||
return promUnit
|
||||
}
|
||||
return unit
|
||||
}
|
||||
|
||||
var unitSuffixes = map[string]string{
|
||||
|
|
@ -509,7 +563,7 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
|
|||
if c.namespace != "" {
|
||||
name = c.namespace + name
|
||||
}
|
||||
if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
|
||||
if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
|
||||
name += "_" + suffix
|
||||
}
|
||||
if addCounterSuffix {
|
||||
|
|
@ -556,30 +610,6 @@ func (c *collector) createResourceAttributes(res *resource.Resource) {
|
|||
c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
|
||||
}
|
||||
|
||||
func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
scopeInfo, ok := c.scopeInfos[scope]
|
||||
if ok {
|
||||
return scopeInfo, nil
|
||||
}
|
||||
|
||||
if _, ok := c.scopeInfosInvalid[scope]; ok {
|
||||
return nil, errScopeInvalid
|
||||
}
|
||||
|
||||
scopeInfo, err := createScopeInfoMetric(scope)
|
||||
if err != nil {
|
||||
c.scopeInfosInvalid[scope] = struct{}{}
|
||||
return nil, fmt.Errorf("cannot create scope info metric: %w", err)
|
||||
}
|
||||
|
||||
c.scopeInfos[scope] = scopeInfo
|
||||
|
||||
return scopeInfo, nil
|
||||
}
|
||||
|
||||
func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue