[chore] update dependencies, bump to Go 1.19.1 (#826)

* update dependencies, bump Go version to 1.19

* bump test image Go version

* update golangci-lint

* update gotosocial-drone-build

* sign

* linting, go fmt

* update swagger docs

* update swagger docs

* whitespace

* update contributing.md

* fuckin whoopsie doopsie

* linterino, linteroni

* fix followrequest test not starting processor

* fix other api/client tests not starting processor

* fix remaining tests where processor not started

* bump go-runners version

* don't check last-webfingered-at, processor may have updated this

* update swagger command

* update bun to latest version

* fix embed to work the same as before with new bun

Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
This commit is contained in:
kim 2022-09-28 18:30:40 +01:00 committed by GitHub
commit a156188b3e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
1135 changed files with 258905 additions and 137146 deletions

View file

@ -45,9 +45,7 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o
// Detect if snowball is server location we are talking to.
var snowball bool
if location, ok := c.bucketLocCache.Get(bucketName); ok {
if location == "snowball" {
snowball = true
}
snowball = location == "snowball"
}
var (
@ -64,161 +62,46 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o
// This routine feeds partial object data as and when the caller reads.
go func() {
defer close(reqCh)
defer close(resCh)
defer func() {
// Close the http response body before returning.
// This ends the connection with the server.
if httpReader != nil {
httpReader.Close()
}
}()
defer cancel()
// Used to verify if etag of object has changed since last read.
var etag string
// Loop through the incoming control messages and read data.
for {
select {
// When context is closed exit our routine.
case <-gctx.Done():
// Close the http response body before returning.
// This ends the connection with the server.
if httpReader != nil {
httpReader.Close()
}
return
// Gather incoming request.
case req := <-reqCh:
// If this is the first request we may not need to do a getObject request yet.
if req.isFirstReq {
// First request is a Read/ReadAt.
if req.isReadOp {
// Differentiate between wanting the whole object and just a range.
if req.isReadAt {
// If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested.
// Do not set objectInfo from the first readAt request because it will not get
// the whole object.
opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
} else if req.Offset > 0 {
opts.SetRange(req.Offset, 0)
}
httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{Error: err}
return
}
etag = objectInfo.ETag
// Read at least firstReq.Buffer bytes, if not we have
// reached our EOF.
size, err := readFull(httpReader, req.Buffer)
totalRead += size
if size > 0 && err == io.ErrUnexpectedEOF {
if int64(size) < objectInfo.Size {
// In situations when returned size
// is less than the expected content
// length set by the server, make sure
// we return io.ErrUnexpectedEOF
err = io.ErrUnexpectedEOF
} else {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
}
} else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
// Special cases when server writes more data
// than the content-length, net/http response
// body returns an error, instead of converting
// it to io.EOF - return unexpected EOF.
err = io.ErrUnexpectedEOF
}
// Send back the first response.
resCh <- getResponse{
objectInfo: objectInfo,
Size: size,
Error: err,
didRead: true,
}
} else {
// First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
// Remove range header if already set, for stat Operations to get original file size.
delete(opts.headers, "Range")
objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the go-routine.
return
}
etag = objectInfo.ETag
// Send back the first response.
resCh <- getResponse{
objectInfo: objectInfo,
}
for req := range reqCh {
// If this is the first request we may not need to do a getObject request yet.
if req.isFirstReq {
// First request is a Read/ReadAt.
if req.isReadOp {
// Differentiate between wanting the whole object and just a range.
if req.isReadAt {
// If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested.
// Do not set objectInfo from the first readAt request because it will not get
// the whole object.
opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
} else if req.Offset > 0 {
opts.SetRange(req.Offset, 0)
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
// Remove range header if already set, for stat Operations to get original file size.
delete(opts.headers, "Range")
// Check whether this is snowball
// if yes do not use If-Match feature
// it doesn't work.
if etag != "" && !snowball {
opts.SetMatchETag(etag)
}
objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the goroutine.
resCh <- getResponse{Error: err}
return
}
// Send back the objectInfo.
resCh <- getResponse{
objectInfo: objectInfo,
}
} else {
// Offset changes fetch the new object at an Offset.
// Because the httpReader may not be set by the first
// request if it was a stat or seek it must be checked
// if the object has been read or not to only initialize
// new ones when they haven't been already.
// All readAt requests are new requests.
if req.DidOffsetChange || !req.beenRead {
// Check whether this is snowball
// if yes do not use If-Match feature
// it doesn't work.
if etag != "" && !snowball {
opts.SetMatchETag(etag)
}
if httpReader != nil {
// Close previously opened http reader.
httpReader.Close()
}
// If this request is a readAt only get the specified range.
if req.isReadAt {
// Range is set with respect to the offset and length of the buffer requested.
opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
} else if req.Offset > 0 { // Range is set with respect to the offset.
opts.SetRange(req.Offset, 0)
} else {
// Remove range header if already set
delete(opts.headers, "Range")
}
httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{
Error: err,
}
return
}
totalRead = 0
}
// Read at least req.Buffer bytes, if not we have
etag = objectInfo.ETag
// Read at least firstReq.Buffer bytes, if not we have
// reached our EOF.
size, err := readFull(httpReader, req.Buffer)
totalRead += size
if size > 0 && err == io.ErrUnexpectedEOF {
if int64(totalRead) < objectInfo.Size {
if int64(size) < objectInfo.Size {
// In situations when returned size
// is less than the expected content
// length set by the server, make sure
@ -236,15 +119,123 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o
// it to io.EOF - return unexpected EOF.
err = io.ErrUnexpectedEOF
}
// Reply back how much was read.
// Send back the first response.
resCh <- getResponse{
objectInfo: objectInfo,
Size: size,
Error: err,
didRead: true,
}
} else {
// First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
// Remove range header if already set, for stat Operations to get original file size.
delete(opts.headers, "Range")
objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the go-routine.
return
}
etag = objectInfo.ETag
// Send back the first response.
resCh <- getResponse{
objectInfo: objectInfo,
}
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
// Remove range header if already set, for stat Operations to get original file size.
delete(opts.headers, "Range")
// Check whether this is snowball
// if yes do not use If-Match feature
// it doesn't work.
if etag != "" && !snowball {
opts.SetMatchETag(etag)
}
objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts))
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the goroutine.
return
}
// Send back the objectInfo.
resCh <- getResponse{
objectInfo: objectInfo,
}
} else {
// Offset changes fetch the new object at an Offset.
// Because the httpReader may not be set by the first
// request if it was a stat or seek it must be checked
// if the object has been read or not to only initialize
// new ones when they haven't been already.
// All readAt requests are new requests.
if req.DidOffsetChange || !req.beenRead {
// Check whether this is snowball
// if yes do not use If-Match feature
// it doesn't work.
if etag != "" && !snowball {
opts.SetMatchETag(etag)
}
if httpReader != nil {
// Close previously opened http reader.
httpReader.Close()
}
// If this request is a readAt only get the specified range.
if req.isReadAt {
// Range is set with respect to the offset and length of the buffer requested.
opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
} else if req.Offset > 0 { // Range is set with respect to the offset.
opts.SetRange(req.Offset, 0)
} else {
// Remove range header if already set
delete(opts.headers, "Range")
}
httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{
Error: err,
}
return
}
totalRead = 0
}
// Read at least req.Buffer bytes, if not we have
// reached our EOF.
size, err := readFull(httpReader, req.Buffer)
totalRead += size
if size > 0 && err == io.ErrUnexpectedEOF {
if int64(totalRead) < objectInfo.Size {
// In situations when returned size
// is less than the expected content
// length set by the server, make sure
// we return io.ErrUnexpectedEOF
err = io.ErrUnexpectedEOF
} else {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
}
} else if size == 0 && err == io.EOF && objectInfo.Size > 0 {
// Special cases when server writes more data
// than the content-length, net/http response
// body returns an error, instead of converting
// it to io.EOF - return unexpected EOF.
err = io.ErrUnexpectedEOF
}
// Reply back how much was read.
resCh <- getResponse{
Size: size,
Error: err,
didRead: true,
objectInfo: objectInfo,
}
}
}
}()
@ -611,6 +602,7 @@ func (o *Object) Close() (err error) {
if o == nil {
return errInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
@ -623,6 +615,9 @@ func (o *Object) Close() (err error) {
// Close successfully.
o.cancel()
// Close the request channel to indicate the internal go-routine to exit.
close(o.reqCh)
// Save for future operations.
errMsg := "Object is already closed. Bad file descriptor."
o.prevErr = errors.New(errMsg)

View file

@ -104,7 +104,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5)
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
length, rErr := readFull(reader, buf)
if rErr == io.EOF && partNumber > 1 {
@ -140,7 +140,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// Proceed to upload the part.
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption)
md5Base64, sha256Hex, int64(length),
opts.ServerSideEncryption,
!opts.DisableContentSha256)
if uerr != nil {
return UploadInfo{}, uerr
}
@ -241,7 +243,7 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object
// uploadPart - Uploads a part in a multipart upload.
func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide,
partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool,
) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
@ -289,6 +291,7 @@ func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadI
contentLength: size,
contentMD5Base64: md5Base64,
contentSHA256Hex: sha256Hex,
streamSha256: streamSha256,
}
// Execute PUT on each part.

View file

@ -130,34 +130,44 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
var complMultipartUpload completeMultipartUpload
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
uploadPartsCh := make(chan uploadPartReq)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
uploadedPartsCh := make(chan uploadedPartRes)
// Used for readability, lastPartNumber is always totalPartsCount.
lastPartNumber := totalPartsCount
partitionCtx, partitionCancel := context.WithCancel(ctx)
defer partitionCancel()
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
uploadPartsCh <- uploadPartReq{PartNum: p}
}
close(uploadPartsCh)
go func() {
defer close(uploadPartsCh)
partsBuf := make([][]byte, opts.getNumThreads())
for i := range partsBuf {
partsBuf[i] = make([]byte, 0, partSize)
}
for p := 1; p <= totalPartsCount; p++ {
select {
case <-partitionCtx.Done():
return
case uploadPartsCh <- uploadPartReq{PartNum: p}:
}
}
}()
// Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= opts.getNumThreads(); w++ {
go func(w int, partSize int64) {
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
go func(partSize int64) {
for {
var uploadReq uploadPartReq
var ok bool
select {
case <-ctx.Done():
return
case uploadReq, ok = <-uploadPartsCh:
if !ok {
return
}
// Each worker will draw from the part channel and upload in parallel.
}
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
@ -171,22 +181,15 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
partSize = lastPartSize
}
n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize])
if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
uploadedPartsCh <- uploadedPartRes{
Error: rerr,
}
// Exit the goroutine.
return
}
// Get a section reader on a particular offset.
hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress)
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
// Proceed to upload the part.
objPart, err := c.uploadPart(ctx, bucketName, objectName,
uploadID, hookReader, uploadReq.PartNum,
"", "", partSize, opts.ServerSideEncryption)
uploadID, sectionReader, uploadReq.PartNum,
"", "", partSize,
opts.ServerSideEncryption,
!opts.DisableContentSha256,
)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
@ -205,23 +208,28 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
Part: uploadReq.Part,
}
}
}(w, partSize)
}(partSize)
}
// Gather the responses as they occur and update any
// progress bar.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return UploadInfo{}, uploadRes.Error
select {
case <-ctx.Done():
return UploadInfo{}, ctx.Err()
case uploadRes := <-uploadedPartsCh:
if uploadRes.Error != nil {
return UploadInfo{}, uploadRes.Error
}
// Update the totalUploadedSize.
totalUploadedSize += uploadRes.Size
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: uploadRes.Part.ETag,
PartNumber: uploadRes.Part.PartNumber,
})
}
// Update the totalUploadedSize.
totalUploadedSize += uploadRes.Size
// Store the parts to be completed in order.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: uploadRes.Part.ETag,
PartNumber: uploadRes.Part.PartNumber,
})
}
// Verify if we uploaded all the data.
@ -322,7 +330,10 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize),
partNumber, md5Base64, "", partSize, opts.ServerSideEncryption)
partNumber, md5Base64, "", partSize,
opts.ServerSideEncryption,
!opts.DisableContentSha256,
)
if uerr != nil {
return UploadInfo{}, uerr
}
@ -452,6 +463,7 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
contentLength: size,
contentMD5Base64: md5Base64,
contentSHA256Hex: sha256Hex,
streamSha256: !opts.DisableContentSha256,
}
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {

View file

@ -84,6 +84,7 @@ type PutObjectOptions struct {
PartSize uint64
LegalHold LegalHoldStatus
SendContentMd5 bool
DisableContentSha256 bool
DisableMultipart bool
Internal AdvancedPutOptions
}
@ -344,7 +345,10 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
// Proceed to upload the part.
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
md5Base64, "", int64(length), opts.ServerSideEncryption)
md5Base64, "", int64(length),
opts.ServerSideEncryption,
!opts.DisableContentSha256,
)
if uerr != nil {
return UploadInfo{}, uerr
}

View file

@ -111,7 +111,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.29"
libraryVersion = "v7.0.36"
)
// User Agent should always following the below style.
@ -315,14 +315,16 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
// - For signature v4 request if the connection is insecure compute only sha256.
// - For signature v4 request if the connection is secure compute only md5.
// - For anonymous request compute md5.
func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
hashSums = make(map[string][]byte)
hashAlgos = make(map[string]md5simd.Hasher)
if c.overrideSignerType.IsV4() {
if c.secure {
hashAlgos["md5"] = c.md5Hasher()
} else {
hashAlgos["sha256"] = c.sha256Hasher()
if isSha256Requested {
hashAlgos["sha256"] = c.sha256Hasher()
}
}
} else {
if c.overrideSignerType.IsAnonymous() {
@ -377,21 +379,20 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro
atomic.StoreInt32(&c.healthStatus, unknown)
return
case <-timer.C:
timer.Reset(duration)
// Do health check the first time and ONLY if the connection is marked offline
if c.IsOffline() {
gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second)
_, err := c.getBucketLocation(gctx, probeBucketName)
gcancel()
if IsNetworkOrHostDown(err, false) {
// Still network errors do not need to do anything.
continue
}
switch ToErrorResponse(err).Code {
case "NoSuchBucket", "AccessDenied", "":
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
if !IsNetworkOrHostDown(err, false) {
switch ToErrorResponse(err).Code {
case "NoSuchBucket", "AccessDenied", "":
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
}
}
}
timer.Reset(duration)
}
}
}(hcDuration)
@ -417,6 +418,7 @@ type requestMetadata struct {
contentLength int64
contentMD5Base64 string // carries base64 encoded md5sum
contentSHA256Hex string // carries hex encoded sha256sum
streamSha256 bool
}
// dumpHTTP - dump HTTP request and response.
@ -593,12 +595,11 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// Initiate the request.
res, err = c.do(req)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
if isRequestErrorRetryable(err) {
// Retry the request
continue
}
// Retry the request
continue
return nil, err
}
// For any known successful http status, return quickly.
@ -812,7 +813,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
case signerType.IsV2():
// Add signature version '2' authorization header.
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
case metadata.objectName != "" && metadata.queryValues == nil && method == http.MethodPut && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
case metadata.streamSha256 && !c.secure:
// Streaming signature is used by default for a PUT object request. Additionally we also
// look if the initialized client is secure, if yes then we don't need to perform
// streaming signature.

View file

@ -88,7 +88,8 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse)
streamSha256 := true
return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x

View file

@ -20,6 +20,7 @@ package minio
import (
"fmt"
"io"
"sync"
)
// hookReader hooks additional reader in the source stream. It is
@ -27,6 +28,7 @@ import (
// notified about the exact number of bytes read from the primary
// source on each Read operation.
type hookReader struct {
mu sync.RWMutex
source io.Reader
hook io.Reader
}
@ -34,6 +36,9 @@ type hookReader struct {
// Seek implements io.Seeker. Seeks source first, and if necessary
// seeks hook if Seek method is appropriately found.
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
hr.mu.Lock()
defer hr.mu.Unlock()
// Verify for source has embedded Seeker, use it.
sourceSeeker, ok := hr.source.(io.Seeker)
if ok {
@ -43,18 +48,21 @@ func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
}
}
// Verify if hook has embedded Seeker, use it.
hookSeeker, ok := hr.hook.(io.Seeker)
if ok {
var m int64
m, err = hookSeeker.Seek(offset, whence)
if err != nil {
return 0, err
}
if n != m {
return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
if hr.hook != nil {
// Verify if hook has embedded Seeker, use it.
hookSeeker, ok := hr.hook.(io.Seeker)
if ok {
var m int64
m, err = hookSeeker.Seek(offset, whence)
if err != nil {
return 0, err
}
if n != m {
return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
}
}
}
return n, nil
}
@ -62,14 +70,19 @@ func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
// value 'n' number of bytes are reported through the hook. Returns
// error for all non io.EOF conditions.
func (hr *hookReader) Read(b []byte) (n int, err error) {
hr.mu.RLock()
defer hr.mu.RUnlock()
n, err = hr.source.Read(b)
if err != nil && err != io.EOF {
return n, err
}
// Progress the hook with the total read bytes from the source.
if _, herr := hr.hook.Read(b[:n]); herr != nil {
if herr != io.EOF {
return n, herr
if hr.hook != nil {
// Progress the hook with the total read bytes from the source.
if _, herr := hr.hook.Read(b[:n]); herr != nil {
if herr != io.EOF {
return n, herr
}
}
}
return n, err
@ -79,7 +92,10 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
// reports the data read from the source to the hook.
func newHook(source, hook io.Reader) io.Reader {
if hook == nil {
return source
return &hookReader{source: source}
}
return &hookReader{
source: source,
hook: hook,
}
return &hookReader{source, hook}
}

View file

@ -21,7 +21,6 @@ import (
"os"
"path/filepath"
homedir "github.com/mitchellh/go-homedir"
ini "gopkg.in/ini.v1"
)
@ -62,7 +61,7 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
if p.Filename == "" {
p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
if p.Filename == "" {
homeDir, err := homedir.Dir()
homeDir, err := os.UserHomeDir()
if err != nil {
return Value{}, err
}

View file

@ -24,7 +24,6 @@ import (
"runtime"
jsoniter "github.com/json-iterator/go"
homedir "github.com/mitchellh/go-homedir"
)
// A FileMinioClient retrieves credentials from the current user's home
@ -65,7 +64,7 @@ func (p *FileMinioClient) Retrieve() (Value, error) {
if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
p.Filename = value
} else {
homeDir, err := homedir.Dir()
homeDir, err := os.UserHomeDir()
if err != nil {
return Value{}, err
}

View file

@ -289,7 +289,10 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody,
}
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
token, _ := fetchIMDSToken(client, endpoint)
token, err := fetchIMDSToken(client, endpoint)
if err != nil {
return ec2RoleCredRespBody{}, err
}
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
u, err := getIAMRoleURL(endpoint)

View file

@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
* Copyright 2019-2022 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -25,6 +25,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
)
@ -122,12 +123,14 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
u.RawQuery = v.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := clnt.Do(req)
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err

View file

@ -89,12 +89,12 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return value, stripPassword(err)
return value, err
}
resp, err := c.Client.Do(req)
if err != nil {
return value, stripPassword(err)
return value, err
}
defer resp.Body.Close()

View file

@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019-2021 MinIO, Inc.
* Copyright 2019-2022 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -24,6 +24,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
)
@ -105,22 +106,6 @@ func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
}
}
func stripPassword(err error) error {
urlErr, ok := err.(*url.Error)
if ok {
u, _ := url.Parse(urlErr.URL)
if u == nil {
return urlErr
}
values := u.Query()
values.Set("LDAPPassword", "xxxxx")
u.RawQuery = values.Encode()
urlErr.URL = u.String()
return urlErr
}
return err
}
// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses
// LDAP Identity with a specified session policy. The `policy` parameter must be
// a JSON string specifying the policy document.
@ -156,16 +141,16 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
}
u.RawQuery = v.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
if err != nil {
return value, stripPassword(err)
return value, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := k.Client.Do(req)
if err != nil {
return value, stripPassword(err)
return value, err
}
defer resp.Body.Close()

View file

@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
* Copyright 2019-2022 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,6 +26,7 @@ import (
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
@ -139,13 +140,13 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
return AssumeRoleWithWebIdentityResponse{}, err
}
u.RawQuery = v.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := clnt.Do(req)
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err

View file

@ -329,15 +329,15 @@ type Expiration struct {
XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"`
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
}
// MarshalJSON customizes json encoding by removing empty day/date specification.
func (e Expiration) MarshalJSON() ([]byte, error) {
type expiration struct {
Date *ExpirationDate `json:"Date,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker
Date *ExpirationDate `json:"Date,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
}
newexp := expiration{

View file

@ -197,8 +197,8 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
if min < 0 {
return errInvalidArgument("Minimum limit cannot be negative.")
}
if max < 0 {
return errInvalidArgument("Maximum limit cannot be negative.")
if max <= 0 {
return errInvalidArgument("Maximum limit cannot be non-positive.")
}
p.contentLengthRange.min = min
p.contentLengthRange.max = max

View file

@ -19,7 +19,10 @@ package minio
import (
"context"
"crypto/x509"
"errors"
"net/http"
"net/url"
"time"
)
@ -123,3 +126,23 @@ func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
_, ok = retryableHTTPStatusCodes[httpStatusCode]
return ok
}
// For now, all http Do() requests are retriable except some well defined errors
func isRequestErrorRetryable(err error) bool {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return false
}
if ue, ok := err.(*url.Error); ok {
e := ue.Unwrap()
switch e.(type) {
// x509: certificate signed by unknown authority
case x509.UnknownAuthorityError:
return false
}
switch e.Error() {
case "http: server gave HTTP response to HTTPS client":
return false
}
}
return true
}