[chore] bump dependencies (#4406)

- codeberg.org/gruf/go-ffmpreg: v0.6.9 -> v0.6.10
- github.com/ncruces/go-sqlite3: v0.27.1 -> v0.28.0
- github.com/stretchr/testify: v1.10.0 -> v1.11.1
- github.com/tdewolff/minify/v2 v2.23.11 -> v2.24.2
- go.opentelemetry.io/otel{,/*}: v1.37.0 -> v1.38.0
- go.opentelemetry.io/contrib/*: v0.62.0 -> v0.63.0

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4406
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-09-04 15:29:27 +02:00 committed by kim
commit 78defcd916
274 changed files with 9213 additions and 2368 deletions

View file

@ -223,15 +223,7 @@ func (gsb *Balancer) ExitIdle() {
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
ei.ExitIdle()
return
}
gsb.mu.Lock()
defer gsb.mu.Unlock()
for sc := range balToUpdate.subconns {
sc.Connect()
}
balToUpdate.ExitIdle()
}
// updateSubConnState forwards the update to the appropriate child.

View file

@ -26,30 +26,32 @@ import (
)
var (
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
// EnableTXTServiceConfig is set if the DNS resolver should perform TXT
// lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not
// "false").
EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true)
// TXTErrIgnore is set if TXT errors should be ignored
// ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
// RingHashCap indicates the maximum ring size which defaults to 4096
// entries but may be overridden by setting the environment variable
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
// LeastRequestLB is set if we should support the least_request_experimental
// LB policy, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", true)
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
// handshakes that can be performed.
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
// option is present for backward compatibility. This option may be overridden
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
// or "false".
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
// XDSFallbackSupport is the env variable that controls whether support for
// xDS fallback is turned on. If this is unset or is false, only the first
// xDS server in the list of server configs will be used.
XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
// instead of the exiting pickfirst implementation. This can be disabled by
// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"

View file

@ -182,35 +182,6 @@ var (
// other features, including the CSDS service.
NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
// variable.
//
// TODO: Remove this function once the RLS env var is removed.
RegisterRLSClusterSpecifierPluginForTesting func()
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
// Specifier Plugin for testing purposes. This is needed because there is no way
// to unregister the RLS Cluster Specifier Plugin after registering it solely
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
//
// TODO: Remove this function once the RLS env var is removed.
UnregisterRLSClusterSpecifierPluginForTesting func()
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
// purposes, regardless of the RBAC environment variable.
//
// TODO: Remove this function once the RBAC env var is removed.
RegisterRBACHTTPFilterForTesting func()
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
// testing purposes. This is needed because there is no way to unregister the
// HTTP Filter after registering it solely for testing purposes using
// RegisterRBACHTTPFilterForTesting().
//
// TODO: Remove this function once the RBAC env var is removed.
UnregisterRBACHTTPFilterForTesting func()
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)

View file

@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
// DNS address (non-IP).
ctx, cancel := context.WithCancel(context.Background())
d := &dnsResolver{
host: host,
port: port,
ctx: ctx,
cancel: cancel,
cc: cc,
rn: make(chan struct{}, 1),
disableServiceConfig: opts.DisableServiceConfig,
host: host,
port: port,
ctx: ctx,
cancel: cancel,
cc: cc,
rn: make(chan struct{}, 1),
enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig,
}
d.resolver, err = internal.NewNetResolver(target.URL.Host)
@ -181,8 +181,8 @@ type dnsResolver struct {
// finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
disableServiceConfig bool
wg sync.WaitGroup
enableServiceConfig bool
}
// ResolveNow invoke an immediate resolution of the target that this
@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
if len(srv) > 0 {
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
}
if !d.disableServiceConfig {
if d.enableServiceConfig {
state.ServiceConfig = d.lookupTXT(ctx)
}
return &state, nil

View file

@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
e.SetMaxDynamicTableSizeLimit(v)
}
// itemNodePool is used to reduce heap allocations.
var itemNodePool = sync.Pool{
New: func() any {
return &itemNode{}
},
}
type itemNode struct {
it any
next *itemNode
@ -51,7 +58,9 @@ type itemList struct {
}
func (il *itemList) enqueue(i any) {
n := &itemNode{it: i}
n := itemNodePool.Get().(*itemNode)
n.next = nil
n.it = i
if il.tail == nil {
il.head, il.tail = n, n
return
@ -71,7 +80,9 @@ func (il *itemList) dequeue() any {
return nil
}
i := il.head.it
temp := il.head
il.head = il.head.next
itemNodePool.Put(temp)
if il.head == nil {
il.tail = nil
}
@ -146,10 +157,11 @@ type earlyAbortStream struct {
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
type dataFrame struct {
streamID uint32
endStream bool
h []byte
reader mem.Reader
streamID uint32
endStream bool
h []byte
data mem.BufferSlice
processing bool
// onEachWrite is called every time
// a part of data is written out.
onEachWrite func()
@ -234,6 +246,7 @@ type outStream struct {
itl *itemList
bytesOutStanding int
wq *writeQuota
reader mem.Reader
next *outStream
prev *outStream
@ -461,7 +474,9 @@ func (c *controlBuffer) finish() {
v.onOrphaned(ErrConnClosing)
}
case *dataFrame:
_ = v.reader.Close()
if !v.processing {
v.data.Free()
}
}
}
@ -650,10 +665,11 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
reader: mem.BufferSlice{}.Reader(),
}
l.estdStreams[h.streamID] = str
}
@ -685,10 +701,11 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
}
// Case 2: Client wants to originate stream.
str := &outStream{
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
id: h.streamID,
state: empty,
itl: &itemList{},
wq: h.wq,
reader: mem.BufferSlice{}.Reader(),
}
return l.originateStream(str, h)
}
@ -790,10 +807,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// a RST_STREAM before stream initialization thus the stream might
// not be established yet.
delete(l.estdStreams, c.streamID)
str.reader.Close()
str.deleteSelf()
for head := str.itl.dequeueAll(); head != nil; head = head.next {
if df, ok := head.it.(*dataFrame); ok {
_ = df.reader.Close()
if !df.processing {
df.data.Free()
}
}
}
}
@ -928,7 +948,13 @@ func (l *loopyWriter) processData() (bool, error) {
if str == nil {
return true, nil
}
reader := str.reader
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
if !dataItem.processing {
dataItem.processing = true
str.reader.Reset(dataItem.data)
dataItem.data.Free()
}
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
// Every dataFrame has two buffers; h that keeps grpc-message header and data
@ -936,13 +962,13 @@ func (l *loopyWriter) processData() (bool, error) {
// from data is copied to h to make as big as the maximum possible HTTP2 frame
// size.
if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
_ = dataItem.reader.Close()
_ = reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@ -971,8 +997,8 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
dSize := min(maxSize-hSize, dataItem.reader.Remaining())
remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
dSize := min(maxSize-hSize, reader.Remaining())
remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
size := hSize + dSize
var buf *[]byte
@ -993,7 +1019,7 @@ func (l *loopyWriter) processData() (bool, error) {
defer pool.Put(buf)
copy((*buf)[:hSize], dataItem.h)
_, _ = dataItem.reader.Read((*buf)[hSize:])
_, _ = reader.Read((*buf)[hSize:])
}
// Now that outgoing flow controls are checked we can replenish str's write quota
@ -1014,7 +1040,7 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem.h = dataItem.h[hSize:]
if remainingBytes == 0 { // All the data from that message was written out.
_ = dataItem.reader.Close()
_ = reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {

View file

@ -309,11 +309,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
scheme = "https"
}
}
dynamicWindow := true
icwz := int32(initialWindowSize)
if opts.InitialConnWindowSize >= defaultWindowSize {
icwz = opts.InitialConnWindowSize
dynamicWindow = false
}
writeBufSize := opts.WriteBufferSize
readBufSize := opts.ReadBufferSize
@ -381,9 +379,8 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
dynamicWindow = false
}
if dynamicWindow {
if !opts.StaticWindowSize {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
@ -1091,32 +1088,29 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
reader := data.Reader()
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
_ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
_ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
reader: reader,
data: data,
}
if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
_ = reader.Close()
dataLen := data.Len()
if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota.
if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
return err
}
}
data.Ref()
if err := t.controlBuf.put(df); err != nil {
_ = reader.Close()
data.Free()
return err
}
t.incrMsgSent()

View file

@ -132,6 +132,10 @@ type http2Server struct {
maxStreamID uint32 // max stream ID ever seen
logger *grpclog.PrefixLogger
// setResetPingStrikes is stored as a closure instead of making this a
// method on http2Server to avoid a heap allocation when converting a method
// to a closure for passing to frames objects.
setResetPingStrikes func()
}
// NewServerTransport creates a http2 transport with conn and configuration
@ -176,16 +180,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
Val: config.MaxStreams,
})
}
dynamicWindow := true
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
dynamicWindow = false
}
icwz := int32(initialWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
dynamicWindow = false
}
if iwz != defaultWindowSize {
isettings = append(isettings, http2.Setting{
@ -266,6 +267,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
initialWindowSize: iwz,
bufferPool: config.BufferPool,
}
t.setResetPingStrikes = func() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
czSecurity = au.GetSecurityValue()
@ -285,7 +289,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.logger = prefixLoggerForServerTransport(t)
t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
if !config.StaticWindowSize {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
@ -596,10 +600,25 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
return nil
}
}
if s.ctx.Err() != nil {
t.mu.Unlock()
// Early abort in case the timeout was zero or so low it already fired.
t.controlBuf.put(&earlyAbortStream{
httpStatus: http.StatusOK,
streamID: s.id,
contentSubtype: s.contentSubtype,
status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()),
rst: !frame.StreamEnded(),
})
return nil
}
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
// Start a timer to close the stream on reaching the deadline.
if timeoutSet {
// We need to wait for s.cancel to be updated before calling
@ -1016,10 +1035,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
return nil
}
func (t *http2Server) setResetPingStrikes() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
@ -1132,17 +1147,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
reader := data.Reader()
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.writeHeader(s, nil); err != nil {
_ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
_ = reader.Close()
return t.streamContextErr(s)
}
}
@ -1150,15 +1161,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _
df := &dataFrame{
streamID: s.id,
h: hdr,
reader: reader,
data: data,
onEachWrite: t.setResetPingStrikes,
}
if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
_ = reader.Close()
dataLen := data.Len()
if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
return t.streamContextErr(s)
}
data.Ref()
if err := t.controlBuf.put(df); err != nil {
_ = reader.Close()
data.Free()
return err
}
t.incrMsgSent()

View file

@ -200,9 +200,6 @@ func decodeTimeout(s string) (time.Duration, error) {
if err != nil {
return 0, err
}
if t == 0 {
return 0, fmt.Errorf("transport: timeout must be positive: %q", s)
}
const maxHours = math.MaxInt64 / uint64(time.Hour)
if d == time.Hour && t > maxHours {
// This timeout would overflow math.MaxInt64; clamp it.

View file

@ -466,6 +466,7 @@ type ServerConfig struct {
MaxHeaderListSize *uint32
HeaderTableSize *uint32
BufferPool mem.BufferPool
StaticWindowSize bool
}
// ConnectOptions covers all relevant options for communicating with the server.
@ -504,6 +505,8 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
// The mem.BufferPool to use when reading/writing to the wire.
BufferPool mem.BufferPool
// StaticWindowSize controls whether dynamic window sizing is enabled.
StaticWindowSize bool
}
// WriteOptions provides additional hints and information for message