[chore] update go dependencies (#4304)

- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-06-30 15:19:09 +02:00 committed by kim
commit 8b0ea56027
294 changed files with 139999 additions and 21873 deletions

View file

@ -730,6 +730,8 @@ type Metrics struct {
Errors TimedErrStats `json:"failed,omitempty"`
// Total number of entries that are queued for replication
QStats InQueueMetric `json:"queued"`
// Total number of entries that have replication in progress
InProgress InProgressMetric `json:"inProgress"`
// Deprecated fields
// Total Pending size in bytes across targets
PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
@ -830,6 +832,9 @@ type InQueueMetric struct {
Max QStat `json:"peak" msg:"pq"`
}
// InProgressMetric holds stats for objects with replication in progress
type InProgressMetric InQueueMetric
// MetricName name of replication metric
type MetricName string
@ -849,6 +854,14 @@ type WorkerStat struct {
Max int32 `json:"max"`
}
// TgtHealth holds health status of a target
type TgtHealth struct {
Online bool `json:"online"`
LastOnline time.Time `json:"lastOnline"`
TotalDowntime time.Duration `json:"totalDowntime"`
OfflineCount int64 `json:"offlineCount"`
}
// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
// and number of entries that failed replication after 3 retries
type ReplMRFStats struct {
@ -863,15 +876,18 @@ type ReplMRFStats struct {
type ReplQNodeStats struct {
NodeName string `json:"nodeName"`
Uptime int64 `json:"uptime"`
Workers WorkerStat `json:"activeWorkers"`
Workers WorkerStat `json:"workers"`
XferStats map[MetricName]XferStats `json:"transferSummary"`
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
QStats InQueueMetric `json:"queueStats"`
MRFStats ReplMRFStats `json:"mrfStats"`
Retries CounterSummary `json:"retries"`
Errors CounterSummary `json:"errors"`
QStats InQueueMetric `json:"queueStats"`
InProgressStats InProgressMetric `json:"progressStats"`
MRFStats ReplMRFStats `json:"mrfStats"`
Retries CounterSummary `json:"retries"`
Errors CounterSummary `json:"errors"`
TgtHealth map[string]TgtHealth `json:"tgtHealth,omitempty"`
}
// CounterSummary denotes the stats counter summary
@ -918,6 +934,19 @@ func (q ReplQueueStats) qStatSummary() InQueueMetric {
return m
}
// inProgressSummary returns cluster level stats for objects with replication in progress
func (q ReplQueueStats) inProgressSummary() InProgressMetric {
m := InProgressMetric{}
for _, v := range q.Nodes {
m.Avg.Add(v.InProgressStats.Avg)
m.Curr.Add(v.InProgressStats.Curr)
if m.Max.Count < v.InProgressStats.Max.Count {
m.Max.Add(v.InProgressStats.Max)
}
}
return m
}
// ReplQStats holds stats for objects in replication queue
type ReplQStats struct {
Uptime int64 `json:"uptime"`
@ -926,7 +955,9 @@ type ReplQStats struct {
XferStats map[MetricName]XferStats `json:"xferStats"`
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
QStats InQueueMetric `json:"qStats"`
QStats InQueueMetric `json:"qStats"`
InProgressStats InProgressMetric `json:"progressStats"`
MRFStats ReplMRFStats `json:"mrfStats"`
Retries CounterSummary `json:"retries"`
Errors CounterSummary `json:"errors"`
@ -935,10 +966,10 @@ type ReplQStats struct {
// QStats returns cluster level stats for objects in replication queue
func (q ReplQueueStats) QStats() (r ReplQStats) {
r.QStats = q.qStatSummary()
r.InProgressStats = q.inProgressSummary()
r.XferStats = make(map[MetricName]XferStats)
r.TgtXferStats = make(map[string]map[MetricName]XferStats)
r.Workers = q.Workers()
for _, node := range q.Nodes {
for arn := range node.TgtXferStats {
xmap, ok := node.TgtXferStats[arn]

View file

@ -0,0 +1,73 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2025 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"bytes"
"errors"
"io"
)
// PeekReadCloser offers a way to peek a ReadCloser stream and then
// return the exact stream of the underlying ReadCloser
type PeekReadCloser struct {
io.ReadCloser
recordMode bool
recordMaxBuf int
recordBuf *bytes.Buffer
}
// ReplayFromStart ensures next Read() will restart to stream the
// underlying ReadCloser stream from the beginning
func (prc *PeekReadCloser) ReplayFromStart() {
prc.recordMode = false
}
func (prc *PeekReadCloser) Read(p []byte) (int, error) {
if prc.recordMode {
if prc.recordBuf.Len() > prc.recordMaxBuf {
return 0, errors.New("maximum peek buffer exceeded")
}
n, err := prc.ReadCloser.Read(p)
prc.recordBuf.Write(p[:n])
return n, err
}
// Replay mode
if prc.recordBuf.Len() > 0 {
pn, _ := prc.recordBuf.Read(p)
return pn, nil
}
return prc.ReadCloser.Read(p)
}
// Close releases the record buffer memory and close the underlying ReadCloser
func (prc *PeekReadCloser) Close() error {
prc.recordBuf.Reset()
return prc.ReadCloser.Close()
}
// NewPeekReadCloser returns a new peek reader
func NewPeekReadCloser(rc io.ReadCloser, maxBufSize int) *PeekReadCloser {
return &PeekReadCloser{
ReadCloser: rc,
recordMode: true, // recording mode by default
recordBuf: bytes.NewBuffer(make([]byte, 0, 1024)),
recordMaxBuf: maxBufSize,
}
}