mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 13:12:25 -05:00 
			
		
		
		
	[performance]: make s3 urls cacheable (#1194)
Implements #864 and should speed up s3 based installations by a lot. With more static urls, we can then also implement #1026 for even better performance when used in conjunction with CDNs
This commit is contained in:
		
					parent
					
						
							
								281f712477
							
						
					
				
			
			
				commit
				
					
						bc917a4085
					
				
			
		
					 3 changed files with 82 additions and 94 deletions
				
			
		|  | @ -73,7 +73,7 @@ var Start action.GTSAction = func(ctx context.Context) error { | ||||||
| 	router := testrig.NewTestRouter(dbService) | 	router := testrig.NewTestRouter(dbService) | ||||||
| 	var storageBackend *storage.Driver | 	var storageBackend *storage.Driver | ||||||
| 	if os.Getenv("GTS_STORAGE_BACKEND") == "s3" { | 	if os.Getenv("GTS_STORAGE_BACKEND") == "s3" { | ||||||
| 		storageBackend = testrig.NewS3Storage() | 		storageBackend, _ = storage.NewS3Storage() | ||||||
| 	} else { | 	} else { | ||||||
| 		storageBackend = testrig.NewInMemoryStorage() | 		storageBackend = testrig.NewInMemoryStorage() | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -26,6 +26,7 @@ import ( | ||||||
| 	"path" | 	"path" | ||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
|  | 	"codeberg.org/gruf/go-cache/v3" | ||||||
| 	"codeberg.org/gruf/go-store/v2/kv" | 	"codeberg.org/gruf/go-store/v2/kv" | ||||||
| 	"codeberg.org/gruf/go-store/v2/storage" | 	"codeberg.org/gruf/go-store/v2/storage" | ||||||
| 	"github.com/minio/minio-go/v7" | 	"github.com/minio/minio-go/v7" | ||||||
|  | @ -33,6 +34,11 @@ import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | const ( | ||||||
|  | 	urlCacheTTL             = time.Hour * 24 | ||||||
|  | 	urlCacheExpiryFrequency = time.Minute * 5 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
| // ErrAlreadyExists is a ptr to underlying storage.ErrAlreadyExists, | // ErrAlreadyExists is a ptr to underlying storage.ErrAlreadyExists, | ||||||
| // to put the related errors in the same package as our storage wrapper. | // to put the related errors in the same package as our storage wrapper. | ||||||
| var ErrAlreadyExists = storage.ErrAlreadyExists | var ErrAlreadyExists = storage.ErrAlreadyExists | ||||||
|  | @ -44,8 +50,9 @@ type Driver struct { | ||||||
| 	Storage storage.Storage | 	Storage storage.Storage | ||||||
| 
 | 
 | ||||||
| 	// S3-only parameters | 	// S3-only parameters | ||||||
| 	Proxy  bool | 	Proxy          bool | ||||||
| 	Bucket string | 	Bucket         string | ||||||
|  | 	PresignedCache cache.Cache[string, *url.URL] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // URL will return a presigned GET object URL, but only if running on S3 storage with proxying disabled. | // URL will return a presigned GET object URL, but only if running on S3 storage with proxying disabled. | ||||||
|  | @ -56,74 +63,90 @@ func (d *Driver) URL(ctx context.Context, key string) *url.URL { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// If URL request fails, fallback is to fetch the file. So ignore the error here | 	if u, ok := d.PresignedCache.Get(key); ok { | ||||||
| 	url, _ := s3.Client().PresignedGetObject(ctx, d.Bucket, key, time.Hour, url.Values{ | 		return u | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	u, err := s3.Client().PresignedGetObject(ctx, d.Bucket, key, urlCacheTTL, url.Values{ | ||||||
| 		"response-content-type": []string{mime.TypeByExtension(path.Ext(key))}, | 		"response-content-type": []string{mime.TypeByExtension(path.Ext(key))}, | ||||||
| 	}) | 	}) | ||||||
| 
 | 	if err != nil { | ||||||
| 	return url | 		// If URL request fails, fallback is to fetch the file. So ignore the error here | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	d.PresignedCache.Set(key, u) | ||||||
|  | 	return u | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func AutoConfig() (*Driver, error) { | func AutoConfig() (*Driver, error) { | ||||||
| 	var st storage.Storage |  | ||||||
| 
 |  | ||||||
| 	switch backend := config.GetStorageBackend(); backend { | 	switch backend := config.GetStorageBackend(); backend { | ||||||
| 	case "s3": | 	case "s3": | ||||||
| 		// Load runtime configuration | 		return NewS3Storage() | ||||||
| 		endpoint := config.GetStorageS3Endpoint() |  | ||||||
| 		access := config.GetStorageS3AccessKey() |  | ||||||
| 		secret := config.GetStorageS3SecretKey() |  | ||||||
| 		secure := config.GetStorageS3UseSSL() |  | ||||||
| 		bucket := config.GetStorageS3BucketName() |  | ||||||
| 
 |  | ||||||
| 		// Open the s3 storage implementation |  | ||||||
| 		s3, err := storage.OpenS3(endpoint, bucket, &storage.S3Config{ |  | ||||||
| 			CoreOpts: minio.Options{ |  | ||||||
| 				Creds:  credentials.NewStaticV4(access, secret, ""), |  | ||||||
| 				Secure: secure, |  | ||||||
| 			}, |  | ||||||
| 			GetOpts:      minio.GetObjectOptions{}, |  | ||||||
| 			PutOpts:      minio.PutObjectOptions{}, |  | ||||||
| 			PutChunkSize: 5 * 1024 * 1024, // 5MiB |  | ||||||
| 			StatOpts:     minio.StatObjectOptions{}, |  | ||||||
| 			RemoveOpts:   minio.RemoveObjectOptions{}, |  | ||||||
| 			ListSize:     200, |  | ||||||
| 		}) |  | ||||||
| 		if err != nil { |  | ||||||
| 			return nil, fmt.Errorf("error opening s3 storage: %w", err) |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Set storage impl |  | ||||||
| 		st = s3 |  | ||||||
| 
 |  | ||||||
| 	case "local": | 	case "local": | ||||||
| 		// Load runtime configuration | 		return NewFileStorage() | ||||||
| 		basePath := config.GetStorageLocalBasePath() |  | ||||||
| 
 |  | ||||||
| 		// Open the disk storage implementation |  | ||||||
| 		disk, err := storage.OpenDisk(basePath, &storage.DiskConfig{ |  | ||||||
| 			// Put the store lockfile in the storage dir itself. |  | ||||||
| 			// Normally this would not be safe, since we could end up |  | ||||||
| 			// overwriting the lockfile if we store a file called 'store.lock'. |  | ||||||
| 			// However, in this case it's OK because the keys are set by |  | ||||||
| 			// GtS and not the user, so we know we're never going to overwrite it. |  | ||||||
| 			LockFile: path.Join(basePath, "store.lock"), |  | ||||||
| 		}) |  | ||||||
| 		if err != nil { |  | ||||||
| 			return nil, fmt.Errorf("error opening disk storage: %w", err) |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Set storage impl |  | ||||||
| 		st = disk |  | ||||||
| 
 |  | ||||||
| 	default: | 	default: | ||||||
| 		return nil, fmt.Errorf("invalid storage backend: %s", backend) | 		return nil, fmt.Errorf("invalid storage backend: %s", backend) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func NewFileStorage() (*Driver, error) { | ||||||
|  | 	// Load runtime configuration | ||||||
|  | 	basePath := config.GetStorageLocalBasePath() | ||||||
|  | 
 | ||||||
|  | 	// Open the disk storage implementation | ||||||
|  | 	disk, err := storage.OpenDisk(basePath, &storage.DiskConfig{ | ||||||
|  | 		// Put the store lockfile in the storage dir itself. | ||||||
|  | 		// Normally this would not be safe, since we could end up | ||||||
|  | 		// overwriting the lockfile if we store a file called 'store.lock'. | ||||||
|  | 		// However, in this case it's OK because the keys are set by | ||||||
|  | 		// GtS and not the user, so we know we're never going to overwrite it. | ||||||
|  | 		LockFile: path.Join(basePath, "store.lock"), | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, fmt.Errorf("error opening disk storage: %w", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	return &Driver{ | 	return &Driver{ | ||||||
| 		KVStore: kv.New(st), | 		KVStore: kv.New(disk), | ||||||
| 		Proxy:   config.GetStorageS3Proxy(), | 		Storage: disk, | ||||||
| 		Bucket:  config.GetStorageS3BucketName(), | 	}, nil | ||||||
| 		Storage: st, | } | ||||||
|  | 
 | ||||||
|  | func NewS3Storage() (*Driver, error) { | ||||||
|  | 	// Load runtime configuration | ||||||
|  | 	endpoint := config.GetStorageS3Endpoint() | ||||||
|  | 	access := config.GetStorageS3AccessKey() | ||||||
|  | 	secret := config.GetStorageS3SecretKey() | ||||||
|  | 	secure := config.GetStorageS3UseSSL() | ||||||
|  | 	bucket := config.GetStorageS3BucketName() | ||||||
|  | 
 | ||||||
|  | 	// Open the s3 storage implementation | ||||||
|  | 	s3, err := storage.OpenS3(endpoint, bucket, &storage.S3Config{ | ||||||
|  | 		CoreOpts: minio.Options{ | ||||||
|  | 			Creds:  credentials.NewStaticV4(access, secret, ""), | ||||||
|  | 			Secure: secure, | ||||||
|  | 		}, | ||||||
|  | 		GetOpts:      minio.GetObjectOptions{}, | ||||||
|  | 		PutOpts:      minio.PutObjectOptions{}, | ||||||
|  | 		PutChunkSize: 5 * 1024 * 1024, // 5MiB | ||||||
|  | 		StatOpts:     minio.StatObjectOptions{}, | ||||||
|  | 		RemoveOpts:   minio.RemoveObjectOptions{}, | ||||||
|  | 		ListSize:     200, | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, fmt.Errorf("error opening s3 storage: %w", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// ttl should be lower than the expiry used by S3 to avoid serving invalid URLs | ||||||
|  | 	presignedCache := cache.New[string, *url.URL](0, 1000, urlCacheTTL-urlCacheExpiryFrequency) | ||||||
|  | 	presignedCache.Start(urlCacheExpiryFrequency) | ||||||
|  | 
 | ||||||
|  | 	return &Driver{ | ||||||
|  | 		KVStore:        kv.New(s3), | ||||||
|  | 		Proxy:          config.GetStorageS3Proxy(), | ||||||
|  | 		Bucket:         config.GetStorageS3BucketName(), | ||||||
|  | 		Storage:        s3, | ||||||
|  | 		PresignedCache: presignedCache, | ||||||
| 	}, nil | 	}, nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -26,9 +26,6 @@ import ( | ||||||
| 
 | 
 | ||||||
| 	"codeberg.org/gruf/go-store/v2/kv" | 	"codeberg.org/gruf/go-store/v2/kv" | ||||||
| 	"codeberg.org/gruf/go-store/v2/storage" | 	"codeberg.org/gruf/go-store/v2/storage" | ||||||
| 	"github.com/minio/minio-go/v7" |  | ||||||
| 	"github.com/minio/minio-go/v7/pkg/credentials" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" |  | ||||||
| 	gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage" | 	gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | @ -41,38 +38,6 @@ func NewInMemoryStorage() *gtsstorage.Driver { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func NewS3Storage() *gtsstorage.Driver { |  | ||||||
| 	endpoint := config.GetStorageS3Endpoint() |  | ||||||
| 	access := config.GetStorageS3AccessKey() |  | ||||||
| 	secret := config.GetStorageS3SecretKey() |  | ||||||
| 	secure := config.GetStorageS3UseSSL() |  | ||||||
| 	bucket := config.GetStorageS3BucketName() |  | ||||||
| 	proxy := config.GetStorageS3Proxy() |  | ||||||
| 
 |  | ||||||
| 	s3, err := storage.OpenS3(endpoint, bucket, &storage.S3Config{ |  | ||||||
| 		CoreOpts: minio.Options{ |  | ||||||
| 			Creds:  credentials.NewStaticV4(access, secret, ""), |  | ||||||
| 			Secure: secure, |  | ||||||
| 		}, |  | ||||||
| 		GetOpts:      minio.GetObjectOptions{}, |  | ||||||
| 		PutOpts:      minio.PutObjectOptions{}, |  | ||||||
| 		PutChunkSize: 5 * 1024 * 1024, // 2MiB |  | ||||||
| 		StatOpts:     minio.StatObjectOptions{}, |  | ||||||
| 		RemoveOpts:   minio.RemoveObjectOptions{}, |  | ||||||
| 		ListSize:     200, |  | ||||||
| 	}) |  | ||||||
| 	if err != nil { |  | ||||||
| 		panic(fmt.Errorf("error opening s3 storage: %w", err)) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return >sstorage.Driver{ |  | ||||||
| 		KVStore: kv.New(s3), |  | ||||||
| 		Storage: s3, |  | ||||||
| 		Proxy:   proxy, |  | ||||||
| 		Bucket:  bucket, |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // StandardStorageSetup populates the storage with standard test entries from the given directory. | // StandardStorageSetup populates the storage with standard test entries from the given directory. | ||||||
| func StandardStorageSetup(storage *gtsstorage.Driver, relativePath string) { | func StandardStorageSetup(storage *gtsstorage.Driver, relativePath string) { | ||||||
| 	storedA := newTestStoredAttachments() | 	storedA := newTestStoredAttachments() | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue