mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 14:42:26 -05:00 
			
		
		
		
	[chore] update go dependencies (#4304)
- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix
Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
	
	
This commit is contained in:
		
					parent
					
						
							
								7712885038
							
						
					
				
			
			
				commit
				
					
						8b0ea56027
					
				
			
		
					 294 changed files with 139999 additions and 21873 deletions
				
			
		
							
								
								
									
										55
									
								
								vendor/google.golang.org/grpc/stream.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										55
									
								
								vendor/google.golang.org/grpc/stream.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -101,9 +101,9 @@ type ClientStream interface { | |||
| 	// It must only be called after stream.CloseAndRecv has returned, or | ||||
| 	// stream.Recv has returned a non-nil error (including io.EOF). | ||||
| 	Trailer() metadata.MD | ||||
| 	// CloseSend closes the send direction of the stream. It closes the stream | ||||
| 	// when non-nil error is met. It is also not safe to call CloseSend | ||||
| 	// concurrently with SendMsg. | ||||
| 	// CloseSend closes the send direction of the stream. This method always | ||||
| 	// returns a nil error. The status of the stream may be discovered using | ||||
| 	// RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg. | ||||
| 	CloseSend() error | ||||
| 	// Context returns the context for this stream. | ||||
| 	// | ||||
|  | @ -212,14 +212,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth | |||
| 	} | ||||
| 	// Provide an opportunity for the first RPC to see the first service config | ||||
| 	// provided by the resolver. | ||||
| 	if err := cc.waitForResolvedAddrs(ctx); err != nil { | ||||
| 	nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	var mc serviceconfig.MethodConfig | ||||
| 	var onCommit func() | ||||
| 	newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { | ||||
| 		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) | ||||
| 		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) | ||||
| 	} | ||||
| 
 | ||||
| 	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} | ||||
|  | @ -257,7 +258,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth | |||
| 	return newStream(ctx, func() {}) | ||||
| } | ||||
| 
 | ||||
| func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { | ||||
| func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { | ||||
| 	callInfo := defaultCallInfo() | ||||
| 	if mc.WaitForReady != nil { | ||||
| 		callInfo.failFast = !*mc.WaitForReady | ||||
|  | @ -296,6 +297,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client | |||
| 		Method:         method, | ||||
| 		ContentSubtype: callInfo.contentSubtype, | ||||
| 		DoneFunc:       doneFunc, | ||||
| 		Authority:      callInfo.authority, | ||||
| 	} | ||||
| 
 | ||||
| 	// Set our outgoing compression according to the UseCompressor CallOption, if | ||||
|  | @ -321,19 +323,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client | |||
| 	} | ||||
| 
 | ||||
| 	cs := &clientStream{ | ||||
| 		callHdr:      callHdr, | ||||
| 		ctx:          ctx, | ||||
| 		methodConfig: &mc, | ||||
| 		opts:         opts, | ||||
| 		callInfo:     callInfo, | ||||
| 		cc:           cc, | ||||
| 		desc:         desc, | ||||
| 		codec:        callInfo.codec, | ||||
| 		compressorV0: compressorV0, | ||||
| 		compressorV1: compressorV1, | ||||
| 		cancel:       cancel, | ||||
| 		firstAttempt: true, | ||||
| 		onCommit:     onCommit, | ||||
| 		callHdr:             callHdr, | ||||
| 		ctx:                 ctx, | ||||
| 		methodConfig:        &mc, | ||||
| 		opts:                opts, | ||||
| 		callInfo:            callInfo, | ||||
| 		cc:                  cc, | ||||
| 		desc:                desc, | ||||
| 		codec:               callInfo.codec, | ||||
| 		compressorV0:        compressorV0, | ||||
| 		compressorV1:        compressorV1, | ||||
| 		cancel:              cancel, | ||||
| 		firstAttempt:        true, | ||||
| 		onCommit:            onCommit, | ||||
| 		nameResolutionDelay: nameResolutionDelayed, | ||||
| 	} | ||||
| 	if !cc.dopts.disableRetry { | ||||
| 		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) | ||||
|  | @ -417,7 +420,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) | |||
| 	var beginTime time.Time | ||||
| 	shs := cs.cc.dopts.copts.StatsHandlers | ||||
| 	for _, sh := range shs { | ||||
| 		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) | ||||
| 		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) | ||||
| 		beginTime = time.Now() | ||||
| 		begin := &stats.Begin{ | ||||
| 			Client:                    true, | ||||
|  | @ -573,6 +576,9 @@ type clientStream struct { | |||
| 	onCommit         func() | ||||
| 	replayBuffer     []replayOp // operations to replay on retry | ||||
| 	replayBufferSize int        // current size of replayBuffer | ||||
| 	// nameResolutionDelay indicates if there was a delay in the name resolution. | ||||
| 	// This field is only valid on client side, it's always false on server side. | ||||
| 	nameResolutionDelay bool | ||||
| } | ||||
| 
 | ||||
| type replayOp struct { | ||||
|  | @ -987,7 +993,7 @@ func (cs *clientStream) RecvMsg(m any) error { | |||
| 
 | ||||
| func (cs *clientStream) CloseSend() error { | ||||
| 	if cs.sentLast { | ||||
| 		// TODO: return an error and finish the stream instead, due to API misuse? | ||||
| 		// Return a nil error on repeated calls to this method. | ||||
| 		return nil | ||||
| 	} | ||||
| 	cs.sentLast = true | ||||
|  | @ -1008,7 +1014,10 @@ func (cs *clientStream) CloseSend() error { | |||
| 			binlog.Log(cs.ctx, chc) | ||||
| 		} | ||||
| 	} | ||||
| 	// We never returned an error here for reasons. | ||||
| 	// We don't return an error here as we expect users to read all messages | ||||
| 	// from the stream and get the RPC status from RecvMsg().  Note that | ||||
| 	// SendMsg() must return an error when one occurs so the application | ||||
| 	// knows to stop sending messages, but that does not apply here. | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
|  | @ -1372,7 +1381,7 @@ func (as *addrConnStream) Trailer() metadata.MD { | |||
| 
 | ||||
| func (as *addrConnStream) CloseSend() error { | ||||
| 	if as.sentLast { | ||||
| 		// TODO: return an error and finish the stream instead, due to API misuse? | ||||
| 		// Return a nil error on repeated calls to this method. | ||||
| 		return nil | ||||
| 	} | ||||
| 	as.sentLast = true | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue