mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-30 22:42:24 -05:00 
			
		
		
		
	[chore]: Bump github.com/minio/minio-go/v7 from 7.0.62 to 7.0.63 (#2180)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
		
					parent
					
						
							
								ddd3c2e44b
							
						
					
				
			
			
				commit
				
					
						c0bddd272f
					
				
			
		
					 8 changed files with 183 additions and 108 deletions
				
			
		
							
								
								
									
										10
									
								
								vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -48,6 +48,10 @@ type SnowballOptions struct { | |||
| 	// Compression will typically reduce memory and network usage, | ||||
| 	// Compression can safely be enabled with MinIO hosts. | ||||
| 	Compress bool | ||||
| 
 | ||||
| 	// SkipErrs if enabled will skip any errors while reading the | ||||
| 	// object content while creating the snowball archive | ||||
| 	SkipErrs bool | ||||
| } | ||||
| 
 | ||||
| // SnowballObject contains information about a single object to be added to the snowball. | ||||
|  | @ -184,10 +188,16 @@ objectLoop: | |||
| 			n, err := io.Copy(t, obj.Content) | ||||
| 			if err != nil { | ||||
| 				closeObj() | ||||
| 				if opts.SkipErrs { | ||||
| 					continue | ||||
| 				} | ||||
| 				return err | ||||
| 			} | ||||
| 			if n != obj.Size { | ||||
| 				closeObj() | ||||
| 				if opts.SkipErrs { | ||||
| 					continue | ||||
| 				} | ||||
| 				return io.ErrUnexpectedEOF | ||||
| 			} | ||||
| 			closeObj() | ||||
|  |  | |||
							
								
								
									
										2
									
								
								vendor/github.com/minio/minio-go/v7/api.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/minio/minio-go/v7/api.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -127,7 +127,7 @@ type Options struct { | |||
| // Global constants. | ||||
| const ( | ||||
| 	libraryName    = "minio-go" | ||||
| 	libraryVersion = "v7.0.62" | ||||
| 	libraryVersion = "v7.0.63" | ||||
| ) | ||||
| 
 | ||||
| // User Agent should always following the below style. | ||||
|  |  | |||
							
								
								
									
										8
									
								
								vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -291,7 +291,13 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, | |||
| 	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html | ||||
| 	token, err := fetchIMDSToken(client, endpoint) | ||||
| 	if err != nil { | ||||
| 		return ec2RoleCredRespBody{}, err | ||||
| 		// Return only errors for valid situations, if the IMDSv2 is not enabled | ||||
| 		// we will not be able to get the token, in such a situation we have | ||||
| 		// to rely on IMDSv1 behavior as a fallback, this check ensures that. | ||||
| 		// Refer https://github.com/minio/minio-go/issues/1866 | ||||
| 		if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { | ||||
| 			return ec2RoleCredRespBody{}, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | ||||
|  |  | |||
							
								
								
									
										194
									
								
								vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										194
									
								
								vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -689,10 +689,19 @@ func (e ExistingObjectReplication) Validate() error { | |||
| // TargetMetrics represents inline replication metrics | ||||
| // such as pending, failed and completed bytes in total for a bucket remote target | ||||
| type TargetMetrics struct { | ||||
| 	// Pending size in bytes | ||||
| 	PendingSize uint64 `json:"pendingReplicationSize,omitempty"` | ||||
| 	// Completed count | ||||
| 	ReplicatedCount uint64 `json:"replicationCount,omitempty"` | ||||
| 	// Completed size in bytes | ||||
| 	ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` | ||||
| 	// Bandwidth limit in bytes/sec for this target | ||||
| 	BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` | ||||
| 	// Current bandwidth used in bytes/sec for this target | ||||
| 	CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` | ||||
| 	// errors seen in replication in last minute, hour and total | ||||
| 	Failed TimedErrStats `json:"failed,omitempty"` | ||||
| 	// Deprecated fields | ||||
| 	// Pending size in bytes | ||||
| 	PendingSize uint64 `json:"pendingReplicationSize,omitempty"` | ||||
| 	// Total Replica size in bytes | ||||
| 	ReplicaSize uint64 `json:"replicaSize,omitempty"` | ||||
| 	// Failed size in bytes | ||||
|  | @ -701,37 +710,62 @@ type TargetMetrics struct { | |||
| 	PendingCount uint64 `json:"pendingReplicationCount,omitempty"` | ||||
| 	// Total number of failed operations including metadata updates | ||||
| 	FailedCount uint64 `json:"failedReplicationCount,omitempty"` | ||||
| 	// Bandwidth limit in bytes/sec for this target | ||||
| 	BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` | ||||
| 	// Current bandwidth used in bytes/sec for this target | ||||
| 	CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` | ||||
| 	// Completed count | ||||
| 	ReplicatedCount uint64 `json:"replicationCount,omitempty"` | ||||
| 	// transfer rate for large uploads | ||||
| 	XferRateLrg XferStats `json:"largeTransferRate"` | ||||
| 	// transfer rate for small uploads | ||||
| 	XferRateSml XferStats `json:"smallTransferRate"` | ||||
| } | ||||
| 
 | ||||
| // Metrics represents inline replication metrics for a bucket. | ||||
| type Metrics struct { | ||||
| 	Stats map[string]TargetMetrics | ||||
| 	// Total Pending size in bytes across targets | ||||
| 	PendingSize uint64 `json:"pendingReplicationSize,omitempty"` | ||||
| 	// Completed size in bytes  across targets | ||||
| 	ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` | ||||
| 	// Total Replica size in bytes  across targets | ||||
| 	ReplicaSize uint64 `json:"replicaSize,omitempty"` | ||||
| 	// Total Replica counts | ||||
| 	ReplicaCount int64 `json:"replicaCount,omitempty"` | ||||
| 	// Total Replicated count | ||||
| 	ReplicatedCount int64 `json:"replicationCount,omitempty"` | ||||
| 	// errors seen in replication in last minute, hour and total | ||||
| 	Errors TimedErrStats `json:"failed,omitempty"` | ||||
| 	// Total number of entries that are queued for replication | ||||
| 	QStats InQueueMetric `json:"queued"` | ||||
| 	// Deprecated fields | ||||
| 	// Total Pending size in bytes across targets | ||||
| 	PendingSize uint64 `json:"pendingReplicationSize,omitempty"` | ||||
| 	// Failed size in bytes  across targets | ||||
| 	FailedSize uint64 `json:"failedReplicationSize,omitempty"` | ||||
| 	// Total number of pending operations including metadata updates across targets | ||||
| 	PendingCount uint64 `json:"pendingReplicationCount,omitempty"` | ||||
| 	// Total number of failed operations including metadata updates across targets | ||||
| 	FailedCount uint64 `json:"failedReplicationCount,omitempty"` | ||||
| 	// Total Replica counts | ||||
| 	ReplicaCount int64 `json:"replicaCount,omitempty"` | ||||
| 	// Total Replicated count | ||||
| 	ReplicatedCount int64 `json:"replicationCount,omitempty"` | ||||
| } | ||||
| 
 | ||||
| // RStat - has count and bytes for replication metrics | ||||
| type RStat struct { | ||||
| 	Count float64 `json:"count"` | ||||
| 	Bytes int64   `json:"bytes"` | ||||
| } | ||||
| 
 | ||||
| // Add two RStat | ||||
| func (r RStat) Add(r1 RStat) RStat { | ||||
| 	return RStat{ | ||||
| 		Count: r.Count + r1.Count, | ||||
| 		Bytes: r.Bytes + r1.Bytes, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // TimedErrStats holds error stats for a time period | ||||
| type TimedErrStats struct { | ||||
| 	LastMinute RStat `json:"lastMinute"` | ||||
| 	LastHour   RStat `json:"lastHour"` | ||||
| 	Totals     RStat `json:"totals"` | ||||
| } | ||||
| 
 | ||||
| // Add two TimedErrStats | ||||
| func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { | ||||
| 	return TimedErrStats{ | ||||
| 		LastMinute: te.LastMinute.Add(o.LastMinute), | ||||
| 		LastHour:   te.LastHour.Add(o.LastHour), | ||||
| 		Totals:     te.Totals.Add(o.Totals), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // ResyncTargetsInfo provides replication target information to resync replicated data. | ||||
|  | @ -767,10 +801,30 @@ type XferStats struct { | |||
| 	CurrRate float64 `json:"currRate"` | ||||
| } | ||||
| 
 | ||||
| // InQueueStats holds stats for objects in replication queue | ||||
| type InQueueStats struct { | ||||
| 	Count int32 `json:"count"` | ||||
| 	Bytes int64 `json:"bytes"` | ||||
| // Merge two XferStats | ||||
| func (x *XferStats) Merge(x1 XferStats) { | ||||
| 	x.AvgRate += x1.AvgRate | ||||
| 	x.PeakRate += x1.PeakRate | ||||
| 	x.CurrRate += x1.CurrRate | ||||
| } | ||||
| 
 | ||||
| // QStat holds count and bytes for objects in replication queue | ||||
| type QStat struct { | ||||
| 	Count float64 `json:"count"` | ||||
| 	Bytes float64 `json:"bytes"` | ||||
| } | ||||
| 
 | ||||
| // Add 2 QStat entries | ||||
| func (q *QStat) Add(q1 QStat) { | ||||
| 	q.Count += q1.Count | ||||
| 	q.Bytes += q1.Bytes | ||||
| } | ||||
| 
 | ||||
| // InQueueMetric holds stats for objects in replication queue | ||||
| type InQueueMetric struct { | ||||
| 	Curr QStat `json:"curr" msg:"cq"` | ||||
| 	Avg  QStat `json:"avg" msg:"aq"` | ||||
| 	Max  QStat `json:"peak" msg:"pq"` | ||||
| } | ||||
| 
 | ||||
| // MetricName name of replication metric | ||||
|  | @ -785,16 +839,34 @@ const ( | |||
| 	Total MetricName = "Total" | ||||
| ) | ||||
| 
 | ||||
| // WorkerStat has stats on number of replication workers | ||||
| type WorkerStat struct { | ||||
| 	Curr int32   `json:"curr"` | ||||
| 	Avg  float32 `json:"avg"` | ||||
| 	Max  int32   `json:"max"` | ||||
| } | ||||
| 
 | ||||
| // ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes | ||||
| // and number of entries that failed replication after 3 retries | ||||
| type ReplMRFStats struct { | ||||
| 	LastFailedCount uint64 `json:"failedCount_last5min"` | ||||
| 	// Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. | ||||
| 	TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` | ||||
| 	// Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. | ||||
| 	TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` | ||||
| } | ||||
| 
 | ||||
| // ReplQNodeStats holds stats for a node in replication queue | ||||
| type ReplQNodeStats struct { | ||||
| 	NodeName      string `json:"nodeName"` | ||||
| 	Uptime        int64  `json:"uptime"` | ||||
| 	ActiveWorkers int32  `json:"activeWorkers"` | ||||
| 	NodeName string     `json:"nodeName"` | ||||
| 	Uptime   int64      `json:"uptime"` | ||||
| 	Workers  WorkerStat `json:"activeWorkers"` | ||||
| 
 | ||||
| 	XferStats    map[MetricName]XferStats            `json:"xferStats"` | ||||
| 	TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` | ||||
| 	XferStats    map[MetricName]XferStats            `json:"transferSummary"` | ||||
| 	TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` | ||||
| 
 | ||||
| 	QStats map[MetricName]InQueueStats `json:"qStats"` | ||||
| 	QStats   InQueueMetric `json:"queueStats"` | ||||
| 	MRFStats ReplMRFStats  `json:"mrfStats"` | ||||
| } | ||||
| 
 | ||||
| // ReplQueueStats holds stats for replication queue across nodes | ||||
|  | @ -803,33 +875,54 @@ type ReplQueueStats struct { | |||
| } | ||||
| 
 | ||||
| // Workers returns number of workers across all nodes | ||||
| func (q ReplQueueStats) Workers() int64 { | ||||
| 	var workers int64 | ||||
| func (q ReplQueueStats) Workers() (tot WorkerStat) { | ||||
| 	for _, node := range q.Nodes { | ||||
| 		workers += int64(node.ActiveWorkers) | ||||
| 		tot.Avg += node.Workers.Avg | ||||
| 		tot.Curr += node.Workers.Curr | ||||
| 		if tot.Max < node.Workers.Max { | ||||
| 			tot.Max = node.Workers.Max | ||||
| 		} | ||||
| 	} | ||||
| 	return workers | ||||
| 	if len(q.Nodes) > 0 { | ||||
| 		tot.Avg /= float32(len(q.Nodes)) | ||||
| 		tot.Curr /= int32(len(q.Nodes)) | ||||
| 	} | ||||
| 	return tot | ||||
| } | ||||
| 
 | ||||
| // qStatSummary returns cluster level stats for objects in replication queue | ||||
| func (q ReplQueueStats) qStatSummary() InQueueMetric { | ||||
| 	m := InQueueMetric{} | ||||
| 	for _, v := range q.Nodes { | ||||
| 		m.Avg.Add(v.QStats.Avg) | ||||
| 		m.Curr.Add(v.QStats.Curr) | ||||
| 		if m.Max.Count < v.QStats.Max.Count { | ||||
| 			m.Max.Add(v.QStats.Max) | ||||
| 		} | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
| 
 | ||||
| // ReplQStats holds stats for objects in replication queue | ||||
| type ReplQStats struct { | ||||
| 	Uptime  int64 `json:"uptime"` | ||||
| 	Workers int64 `json:"workers"` | ||||
| 	Uptime  int64      `json:"uptime"` | ||||
| 	Workers WorkerStat `json:"workers"` | ||||
| 
 | ||||
| 	XferStats    map[MetricName]XferStats            `json:"xferStats"` | ||||
| 	TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` | ||||
| 
 | ||||
| 	QStats map[MetricName]InQueueStats `json:"qStats"` | ||||
| 	QStats   InQueueMetric `json:"qStats"` | ||||
| 	MRFStats ReplMRFStats  `json:"mrfStats"` | ||||
| } | ||||
| 
 | ||||
| // QStats returns cluster level stats for objects in replication queue | ||||
| func (q ReplQueueStats) QStats() (r ReplQStats) { | ||||
| 	r.QStats = make(map[MetricName]InQueueStats) | ||||
| 	r.QStats = q.qStatSummary() | ||||
| 	r.XferStats = make(map[MetricName]XferStats) | ||||
| 	r.TgtXferStats = make(map[string]map[MetricName]XferStats) | ||||
| 	r.Workers = q.Workers() | ||||
| 
 | ||||
| 	for _, node := range q.Nodes { | ||||
| 		r.Workers += int64(node.ActiveWorkers) | ||||
| 		for arn := range node.TgtXferStats { | ||||
| 			xmap, ok := node.TgtXferStats[arn] | ||||
| 			if !ok { | ||||
|  | @ -859,39 +952,20 @@ func (q ReplQueueStats) QStats() (r ReplQStats) { | |||
| 			st.PeakRate = math.Max(st.PeakRate, v.PeakRate) | ||||
| 			r.XferStats[k] = st | ||||
| 		} | ||||
| 		for k, v := range node.QStats { | ||||
| 			st, ok := r.QStats[k] | ||||
| 			if !ok { | ||||
| 				st = InQueueStats{} | ||||
| 			} | ||||
| 			st.Count += v.Count | ||||
| 			st.Bytes += v.Bytes | ||||
| 			r.QStats[k] = st | ||||
| 		} | ||||
| 		r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount | ||||
| 		r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount | ||||
| 		r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes | ||||
| 		r.Uptime += node.Uptime | ||||
| 	} | ||||
| 	if len(q.Nodes) > 0 { | ||||
| 		for k := range r.XferStats { | ||||
| 			st := r.XferStats[k] | ||||
| 			st.AvgRate /= float64(len(q.Nodes)) | ||||
| 			st.CurrRate /= float64(len(q.Nodes)) | ||||
| 			r.XferStats[k] = st | ||||
| 		} | ||||
| 		for arn := range r.TgtXferStats { | ||||
| 			for m, v := range r.TgtXferStats[arn] { | ||||
| 				v.AvgRate /= float64(len(q.Nodes)) | ||||
| 				v.CurrRate /= float64(len(q.Nodes)) | ||||
| 				r.TgtXferStats[arn][m] = v | ||||
| 			} | ||||
| 		} | ||||
| 		r.Uptime /= int64(len(q.Nodes)) // average uptime | ||||
| 	} | ||||
| 
 | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // MetricsV2 represents replication metrics for a bucket. | ||||
| type MetricsV2 struct { | ||||
| 	Uptime       int64          `json:"uptime"` | ||||
| 	CurrentStats Metrics        `json:"currStats"` | ||||
| 	QueueStats   ReplQueueStats `json:"queueStats"` | ||||
| } | ||||
|  |  | |||
							
								
								
									
										69
									
								
								vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										69
									
								
								vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -121,49 +121,54 @@ func GetRegionFromURL(endpointURL url.URL) string { | |||
| 	if endpointURL.Host == "s3-external-1.amazonaws.com" { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if IsAmazonGovCloudEndpoint(endpointURL) { | ||||
| 		return "us-gov-west-1" | ||||
| 	} | ||||
| 
 | ||||
| 	// if elb's are used we cannot calculate which region it may be, just return empty. | ||||
| 	if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) { | ||||
| 		return "" | ||||
| 	} | ||||
| 	parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) | ||||
| 
 | ||||
| 	// We check for FIPS dualstack matching first to avoid the non-greedy | ||||
| 	// regex for FIPS non-dualstack matching a dualstack URL | ||||
| 	parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 	if IsAmazonFIPSUSEastWestEndpoint(endpointURL) { | ||||
| 		// We check for FIPS dualstack matching first to avoid the non-greedy | ||||
| 		// regex for FIPS non-dualstack matching a dualstack URL | ||||
| 		parts = amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) | ||||
| 		if len(parts) > 1 { | ||||
| 			return parts[1] | ||||
| 		} | ||||
| 		parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) | ||||
| 		if len(parts) > 1 { | ||||
| 			return parts[1] | ||||
| 		} | ||||
| 
 | ||||
| 	parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) | ||||
| 	if len(parts) > 1 { | ||||
| 		return parts[1] | ||||
| 	} | ||||
| 
 | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
|  | @ -186,45 +191,25 @@ func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { | |||
| 		return false | ||||
| 	} | ||||
| 	return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || | ||||
| 		endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || | ||||
| 		IsAmazonFIPSGovCloudEndpoint(endpointURL)) | ||||
| } | ||||
| 
 | ||||
| // IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. | ||||
| // See https://aws.amazon.com/compliance/fips. | ||||
| // IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. | ||||
| func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { | ||||
| 	if endpointURL == sentinelURL { | ||||
| 		return false | ||||
| 	} | ||||
| 	return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || | ||||
| 		endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" || | ||||
| 		endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" | ||||
| } | ||||
| 
 | ||||
| // IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. | ||||
| // See https://aws.amazon.com/compliance/fips. | ||||
| func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { | ||||
| 	if endpointURL == sentinelURL { | ||||
| 		return false | ||||
| 	} | ||||
| 	switch endpointURL.Host { | ||||
| 	case "s3-fips.us-east-2.amazonaws.com": | ||||
| 	case "s3-fips.dualstack.us-west-1.amazonaws.com": | ||||
| 	case "s3-fips.dualstack.us-west-2.amazonaws.com": | ||||
| 	case "s3-fips.dualstack.us-east-2.amazonaws.com": | ||||
| 	case "s3-fips.dualstack.us-east-1.amazonaws.com": | ||||
| 	case "s3-fips.us-west-1.amazonaws.com": | ||||
| 	case "s3-fips.us-west-2.amazonaws.com": | ||||
| 	case "s3-fips.us-east-1.amazonaws.com": | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| 	return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-") | ||||
| } | ||||
| 
 | ||||
| // IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. | ||||
| // See https://aws.amazon.com/compliance/fips. | ||||
| func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { | ||||
| 	return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) | ||||
| 	if endpointURL == sentinelURL { | ||||
| 		return false | ||||
| 	} | ||||
| 	return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com") | ||||
| } | ||||
| 
 | ||||
| // IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue