mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 10:02:26 -05:00 
			
		
		
		
	Merge pull request #214 from NyaaaWhatsUpDoc/improvement/update-storage-library
add git.iim.gay/grufwub/go-store for storage backend, replacing blob.Storage
This commit is contained in:
		
				commit
				
					
						f6492d12d9
					
				
			
		
					 89 changed files with 9362 additions and 230 deletions
				
			
		
							
								
								
									
										9
									
								
								go.mod
									
										
									
									
									
								
							
							
						
						
									
										9
									
								
								go.mod
									
										
									
									
									
								
							|  | @ -3,6 +3,7 @@ module github.com/superseriousbusiness/gotosocial | ||||||
| go 1.17 | go 1.17 | ||||||
| 
 | 
 | ||||||
| require ( | require ( | ||||||
|  | 	git.iim.gay/grufwub/go-store v0.4.1 | ||||||
| 	github.com/ReneKroon/ttlcache v1.7.0 | 	github.com/ReneKroon/ttlcache v1.7.0 | ||||||
| 	github.com/buckket/go-blurhash v1.1.0 | 	github.com/buckket/go-blurhash v1.1.0 | ||||||
| 	github.com/coreos/go-oidc/v3 v3.0.0 | 	github.com/coreos/go-oidc/v3 v3.0.0 | ||||||
|  | @ -41,6 +42,13 @@ require ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| require ( | require ( | ||||||
|  | 	git.iim.gay/grufwub/fastpath v0.2.2 // indirect | ||||||
|  | 	git.iim.gay/grufwub/go-bufpool v0.2.1 // indirect | ||||||
|  | 	git.iim.gay/grufwub/go-bytes v0.7.0 // indirect | ||||||
|  | 	git.iim.gay/grufwub/go-errors v0.2.3 // indirect | ||||||
|  | 	git.iim.gay/grufwub/go-hashenc v0.3.0 // indirect | ||||||
|  | 	git.iim.gay/grufwub/go-mutexes v0.5.0 // indirect | ||||||
|  | 	git.iim.gay/grufwub/go-nowish v0.3.4 // indirect | ||||||
| 	github.com/aymerick/douceur v0.2.0 // indirect | 	github.com/aymerick/douceur v0.2.0 // indirect | ||||||
| 	github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect | 	github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect | ||||||
| 	github.com/davecgh/go-spew v1.1.1 // indirect | 	github.com/davecgh/go-spew v1.1.1 // indirect | ||||||
|  | @ -61,6 +69,7 @@ require ( | ||||||
| 	github.com/golang-jwt/jwt v3.2.2+incompatible // indirect | 	github.com/golang-jwt/jwt v3.2.2+incompatible // indirect | ||||||
| 	github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect | 	github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect | ||||||
| 	github.com/golang/protobuf v1.5.2 // indirect | 	github.com/golang/protobuf v1.5.2 // indirect | ||||||
|  | 	github.com/golang/snappy v0.0.3 // indirect | ||||||
| 	github.com/gorilla/context v1.1.1 // indirect | 	github.com/gorilla/context v1.1.1 // indirect | ||||||
| 	github.com/gorilla/css v1.0.0 // indirect | 	github.com/gorilla/css v1.0.0 // indirect | ||||||
| 	github.com/gorilla/securecookie v1.1.1 // indirect | 	github.com/gorilla/securecookie v1.1.1 // indirect | ||||||
|  |  | ||||||
							
								
								
									
										19
									
								
								go.sum
									
										
									
									
									
								
							
							
						
						
									
										19
									
								
								go.sum
									
										
									
									
									
								
							|  | @ -31,6 +31,23 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl | ||||||
| cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= | cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= | ||||||
| cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= | cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= | ||||||
| dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= | dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= | ||||||
|  | git.iim.gay/grufwub/fastpath v0.2.2 h1:ST89k6XukDE2xN5zZ0VGi7Jo2A/DPhik9dr3/ny4QGg= | ||||||
|  | git.iim.gay/grufwub/fastpath v0.2.2/go.mod h1:HYwrf4ku2CfP/1D3ItBqBbFHayuWkiV6BOwYQQ86dbo= | ||||||
|  | git.iim.gay/grufwub/go-bufpool v0.2.1 h1:TRwEzLtdvutp3+W/5wQYxmW6lAz9twQKmsfiUO0XfSU= | ||||||
|  | git.iim.gay/grufwub/go-bufpool v0.2.1/go.mod h1:X/QI5K0hxyhWPRtu+D+ZRzLqEOqD1Ns/rpNNWq2R1DU= | ||||||
|  | git.iim.gay/grufwub/go-bytes v0.7.0 h1:tGwIrVOANiaifx7702yDdkp5uMVYGk+rheZd6ZuSvEo= | ||||||
|  | git.iim.gay/grufwub/go-bytes v0.7.0/go.mod h1:ADc2p+R74ZjQf0FzIaIUnLyuHdbGCk7ldlaiHauAs+U= | ||||||
|  | git.iim.gay/grufwub/go-errors v0.2.3 h1:MSvzN/tW5Vj5Rm4EmBfdVpzyYjAY92ANE+ESNpZnjvw= | ||||||
|  | git.iim.gay/grufwub/go-errors v0.2.3/go.mod h1:rfKZpjI7A67zJfzpt5zfwAUMA7gec0EHXZEuYv/A9UI= | ||||||
|  | git.iim.gay/grufwub/go-fixedmap v0.1.3/go.mod h1:KB4nV2+NeBMVFvFwpdgsP74AsMiDeX68oD20wiC2S3I= | ||||||
|  | git.iim.gay/grufwub/go-hashenc v0.3.0 h1:2etpzwoUTPTLvlWZ9u9a+FUCGBcVU37x5zM5XZ0kahQ= | ||||||
|  | git.iim.gay/grufwub/go-hashenc v0.3.0/go.mod h1:wjztiGUzaZsEw5kKE6gz/UOFN2cbcDnGiUSUjOLXi4o= | ||||||
|  | git.iim.gay/grufwub/go-mutexes v0.5.0 h1:HojjhBWI1ry1TTvijczhm2oQ5CxLbxzihawqYk2Umto= | ||||||
|  | git.iim.gay/grufwub/go-mutexes v0.5.0/go.mod h1:xMhjpEP5UsCuFQD4qCIcq4pJLf7vMXZ56TD/u+wWJ4Y= | ||||||
|  | git.iim.gay/grufwub/go-nowish v0.3.4 h1:VgUzSEO7xJsJFN2HPbPYHT79s3pUkd5Z8hQOPecZzFU= | ||||||
|  | git.iim.gay/grufwub/go-nowish v0.3.4/go.mod h1:oII7zlMQMFclFzgmI1qRd7DdQXKNHWcJYnwHFgdgiRI= | ||||||
|  | git.iim.gay/grufwub/go-store v0.4.1 h1:orSqupN2iTm8P0sUpGWlCl6qckM2phkFnsB3TFTo6aQ= | ||||||
|  | git.iim.gay/grufwub/go-store v0.4.1/go.mod h1:NaSfOLKNzjj9lUQ0MA/gLGEeRiXhwOx26zR+l/SC9VM= | ||||||
| github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= | ||||||
| github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= | github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= | ||||||
| github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= | github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= | ||||||
|  | @ -194,6 +211,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw | ||||||
| github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= | ||||||
| github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= | github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= | ||||||
| github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= | ||||||
|  | github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= | ||||||
|  | github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= | ||||||
| github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= | github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= | ||||||
| github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | ||||||
| github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= | ||||||
|  |  | ||||||
|  | @ -6,11 +6,11 @@ import ( | ||||||
| 	"net/http" | 	"net/http" | ||||||
| 	"net/http/httptest" | 	"net/http/httptest" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/client/account" | 	"github.com/superseriousbusiness/gotosocial/internal/api/client/account" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -28,7 +28,7 @@ type AccountStandardTestSuite struct { | ||||||
| 	db        db.DB | 	db        db.DB | ||||||
| 	log       *logrus.Logger | 	log       *logrus.Logger | ||||||
| 	tc        typeutils.TypeConverter | 	tc        typeutils.TypeConverter | ||||||
| 	storage   blob.Storage | 	storage   *kv.KVStore | ||||||
| 	federator federation.Federator | 	federator federation.Federator | ||||||
| 	processor processing.Processor | 	processor processing.Processor | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -26,12 +26,12 @@ import ( | ||||||
| 	"net/http/httptest" | 	"net/http/httptest" | ||||||
| 	"testing" | 	"testing" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/assert" | 	"github.com/stretchr/testify/assert" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/client/fileserver" | 	"github.com/superseriousbusiness/gotosocial/internal/api/client/fileserver" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -49,7 +49,7 @@ type ServeFileTestSuite struct { | ||||||
| 	config       *config.Config | 	config       *config.Config | ||||||
| 	db           db.DB | 	db           db.DB | ||||||
| 	log          *logrus.Logger | 	log          *logrus.Logger | ||||||
| 	storage      blob.Storage | 	storage      *kv.KVStore | ||||||
| 	federator    federation.Federator | 	federator    federation.Federator | ||||||
| 	tc           typeutils.TypeConverter | 	tc           typeutils.TypeConverter | ||||||
| 	processor    processing.Processor | 	processor    processing.Processor | ||||||
|  | @ -152,7 +152,7 @@ func (suite *ServeFileTestSuite) TestServeOriginalFileSuccessful() { | ||||||
| 	assert.NoError(suite.T(), err) | 	assert.NoError(suite.T(), err) | ||||||
| 	assert.NotNil(suite.T(), b) | 	assert.NotNil(suite.T(), b) | ||||||
| 
 | 
 | ||||||
| 	fileInStorage, err := suite.storage.RetrieveFileFrom(targetAttachment.File.Path) | 	fileInStorage, err := suite.storage.Get(targetAttachment.File.Path) | ||||||
| 	assert.NoError(suite.T(), err) | 	assert.NoError(suite.T(), err) | ||||||
| 	assert.NotNil(suite.T(), fileInStorage) | 	assert.NotNil(suite.T(), fileInStorage) | ||||||
| 	assert.Equal(suite.T(), b, fileInStorage) | 	assert.Equal(suite.T(), b, fileInStorage) | ||||||
|  |  | ||||||
|  | @ -28,13 +28,13 @@ import ( | ||||||
| 	"net/http/httptest" | 	"net/http/httptest" | ||||||
| 	"testing" | 	"testing" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/assert" | 	"github.com/stretchr/testify/assert" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	mediamodule "github.com/superseriousbusiness/gotosocial/internal/api/client/media" | 	mediamodule "github.com/superseriousbusiness/gotosocial/internal/api/client/media" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/model" | 	"github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -52,7 +52,7 @@ type MediaCreateTestSuite struct { | ||||||
| 	config       *config.Config | 	config       *config.Config | ||||||
| 	db           db.DB | 	db           db.DB | ||||||
| 	log          *logrus.Logger | 	log          *logrus.Logger | ||||||
| 	storage      blob.Storage | 	storage      *kv.KVStore | ||||||
| 	federator    federation.Federator | 	federator    federation.Federator | ||||||
| 	tc           typeutils.TypeConverter | 	tc           typeutils.TypeConverter | ||||||
| 	mediaHandler media.Handler | 	mediaHandler media.Handler | ||||||
|  | @ -118,7 +118,6 @@ func (suite *MediaCreateTestSuite) TearDownTest() { | ||||||
| */ | */ | ||||||
| 
 | 
 | ||||||
| func (suite *MediaCreateTestSuite) TestStatusCreatePOSTImageHandlerSuccessful() { | func (suite *MediaCreateTestSuite) TestStatusCreatePOSTImageHandlerSuccessful() { | ||||||
| 
 |  | ||||||
| 	// set up the context for the request | 	// set up the context for the request | ||||||
| 	t := suite.testTokens["local_account_1"] | 	t := suite.testTokens["local_account_1"] | ||||||
| 	oauthToken := oauth.DBTokenToToken(t) | 	oauthToken := oauth.DBTokenToToken(t) | ||||||
|  | @ -130,10 +129,15 @@ func (suite *MediaCreateTestSuite) TestStatusCreatePOSTImageHandlerSuccessful() | ||||||
| 	ctx.Set(oauth.SessionAuthorizedAccount, suite.testAccounts["local_account_1"]) | 	ctx.Set(oauth.SessionAuthorizedAccount, suite.testAccounts["local_account_1"]) | ||||||
| 
 | 
 | ||||||
| 	// see what's in storage *before* the request | 	// see what's in storage *before* the request | ||||||
| 	storageKeysBeforeRequest, err := suite.storage.ListKeys() | 	storageKeysBeforeRequest := []string{} | ||||||
|  | 	iter, err := suite.storage.Iterator(nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
|  | 	for iter.Next() { | ||||||
|  | 		storageKeysBeforeRequest = append(storageKeysBeforeRequest, iter.Key()) | ||||||
|  | 	} | ||||||
|  | 	iter.Release() | ||||||
| 
 | 
 | ||||||
| 	// create the request | 	// create the request | ||||||
| 	buf, w, err := testrig.CreateMultipartFormData("file", "../../../../testrig/media/test-jpeg.jpg", map[string]string{ | 	buf, w, err := testrig.CreateMultipartFormData("file", "../../../../testrig/media/test-jpeg.jpg", map[string]string{ | ||||||
|  | @ -150,10 +154,15 @@ func (suite *MediaCreateTestSuite) TestStatusCreatePOSTImageHandlerSuccessful() | ||||||
| 	suite.mediaModule.MediaCreatePOSTHandler(ctx) | 	suite.mediaModule.MediaCreatePOSTHandler(ctx) | ||||||
| 
 | 
 | ||||||
| 	// check what's in storage *after* the request | 	// check what's in storage *after* the request | ||||||
| 	storageKeysAfterRequest, err := suite.storage.ListKeys() | 	storageKeysAfterRequest := []string{} | ||||||
|  | 	iter, err = suite.storage.Iterator(nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
|  | 	for iter.Next() { | ||||||
|  | 		storageKeysAfterRequest = append(storageKeysAfterRequest, iter.Key()) | ||||||
|  | 	} | ||||||
|  | 	iter.Release() | ||||||
| 
 | 
 | ||||||
| 	// check response | 	// check response | ||||||
| 	suite.EqualValues(http.StatusOK, recorder.Code) | 	suite.EqualValues(http.StatusOK, recorder.Code) | ||||||
|  |  | ||||||
|  | @ -19,10 +19,10 @@ | ||||||
| package status_test | package status_test | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/client/status" | 	"github.com/superseriousbusiness/gotosocial/internal/api/client/status" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -41,7 +41,7 @@ type StatusStandardTestSuite struct { | ||||||
| 	tc        typeutils.TypeConverter | 	tc        typeutils.TypeConverter | ||||||
| 	federator federation.Federator | 	federator federation.Federator | ||||||
| 	processor processing.Processor | 	processor processing.Processor | ||||||
| 	storage   blob.Storage | 	storage   *kv.KVStore | ||||||
| 
 | 
 | ||||||
| 	// standard suite models | 	// standard suite models | ||||||
| 	testTokens       map[string]*gtsmodel.Token | 	testTokens       map[string]*gtsmodel.Token | ||||||
|  |  | ||||||
|  | @ -1,11 +1,11 @@ | ||||||
| package user_test | package user_test | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user" | 	"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/security" | 	"github.com/superseriousbusiness/gotosocial/internal/api/security" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -24,7 +24,7 @@ type UserStandardTestSuite struct { | ||||||
| 	tc             typeutils.TypeConverter | 	tc             typeutils.TypeConverter | ||||||
| 	federator      federation.Federator | 	federator      federation.Federator | ||||||
| 	processor      processing.Processor | 	processor      processing.Processor | ||||||
| 	storage        blob.Storage | 	storage        *kv.KVStore | ||||||
| 	securityModule *security.Module | 	securityModule *security.Module | ||||||
| 
 | 
 | ||||||
| 	// standard suite models | 	// standard suite models | ||||||
|  |  | ||||||
|  | @ -1,55 +0,0 @@ | ||||||
| package blob |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"fmt" |  | ||||||
| 
 |  | ||||||
| 	"github.com/sirupsen/logrus" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // NewInMem returns an in-memory implementation of the Storage interface. |  | ||||||
| // This is good for testing and whatnot but ***SHOULD ABSOLUTELY NOT EVER |  | ||||||
| // BE USED IN A PRODUCTION SETTING***, because A) everything will be wiped out |  | ||||||
| // if you restart the server and B) if you store lots of images your RAM use |  | ||||||
| // will absolutely go through the roof. |  | ||||||
| func NewInMem(c *config.Config, log *logrus.Logger) (Storage, error) { |  | ||||||
| 	return &inMemStorage{ |  | ||||||
| 		stored: make(map[string][]byte), |  | ||||||
| 		log:    log, |  | ||||||
| 	}, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type inMemStorage struct { |  | ||||||
| 	stored map[string][]byte |  | ||||||
| 	log    *logrus.Logger |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *inMemStorage) StoreFileAt(path string, data []byte) error { |  | ||||||
| 	l := s.log.WithField("func", "StoreFileAt") |  | ||||||
| 	l.Debugf("storing at path %s", path) |  | ||||||
| 	s.stored[path] = data |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *inMemStorage) RetrieveFileFrom(path string) ([]byte, error) { |  | ||||||
| 	l := s.log.WithField("func", "RetrieveFileFrom") |  | ||||||
| 	l.Debugf("retrieving from path %s", path) |  | ||||||
| 	d, ok := s.stored[path] |  | ||||||
| 	if !ok || len(d) == 0 { |  | ||||||
| 		return nil, fmt.Errorf("no data found at path %s", path) |  | ||||||
| 	} |  | ||||||
| 	return d, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *inMemStorage) ListKeys() ([]string, error) { |  | ||||||
| 	keys := []string{} |  | ||||||
| 	for k := range s.stored { |  | ||||||
| 		keys = append(keys, k) |  | ||||||
| 	} |  | ||||||
| 	return keys, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *inMemStorage) RemoveFileAt(path string) error { |  | ||||||
| 	delete(s.stored, path) |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
|  | @ -1,70 +0,0 @@ | ||||||
| package blob |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"fmt" |  | ||||||
| 	"os" |  | ||||||
| 	"path/filepath" |  | ||||||
| 	"strings" |  | ||||||
| 
 |  | ||||||
| 	"github.com/sirupsen/logrus" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // NewLocal returns an implementation of the Storage interface that uses |  | ||||||
| // the local filesystem for storing and retrieving files, attachments, etc. |  | ||||||
| func NewLocal(c *config.Config, log *logrus.Logger) (Storage, error) { |  | ||||||
| 	return &localStorage{ |  | ||||||
| 		config: c, |  | ||||||
| 		log:    log, |  | ||||||
| 	}, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type localStorage struct { |  | ||||||
| 	config *config.Config |  | ||||||
| 	log    *logrus.Logger |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *localStorage) StoreFileAt(path string, data []byte) error { |  | ||||||
| 	l := s.log.WithField("func", "StoreFileAt") |  | ||||||
| 	l.Debugf("storing at path %s", path) |  | ||||||
| 	components := strings.Split(path, "/") |  | ||||||
| 	dir := strings.Join(components[0:len(components)-1], "/") |  | ||||||
| 	if err := os.MkdirAll(dir, 0777); err != nil { |  | ||||||
| 		return fmt.Errorf("error writing file at %s: %s", path, err) |  | ||||||
| 	} |  | ||||||
| 	if err := os.WriteFile(path, data, 0777); err != nil { |  | ||||||
| 		return fmt.Errorf("error writing file at %s: %s", path, err) |  | ||||||
| 	} |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *localStorage) RetrieveFileFrom(path string) ([]byte, error) { |  | ||||||
| 	l := s.log.WithField("func", "RetrieveFileFrom") |  | ||||||
| 	l.Debugf("retrieving from path %s", path) |  | ||||||
| 	b, err := os.ReadFile(path) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, fmt.Errorf("error reading file at %s: %s", path, err) |  | ||||||
| 	} |  | ||||||
| 	return b, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *localStorage) ListKeys() ([]string, error) { |  | ||||||
| 	keys := []string{} |  | ||||||
| 	err := filepath.Walk(s.config.StorageConfig.BasePath, func(path string, info os.FileInfo, err error) error { |  | ||||||
| 		if err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 		if !info.IsDir() { |  | ||||||
| 			keys = append(keys, path) |  | ||||||
| 		} |  | ||||||
| 		return nil |  | ||||||
| 	}) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} |  | ||||||
| 	return keys, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *localStorage) RemoveFileAt(path string) error { |  | ||||||
| 	return os.Remove(path) |  | ||||||
| } |  | ||||||
|  | @ -1,29 +0,0 @@ | ||||||
| /* |  | ||||||
|    GoToSocial |  | ||||||
|    Copyright (C) 2021 GoToSocial Authors admin@gotosocial.org |  | ||||||
| 
 |  | ||||||
|    This program is free software: you can redistribute it and/or modify |  | ||||||
|    it under the terms of the GNU Affero General Public License as published by |  | ||||||
|    the Free Software Foundation, either version 3 of the License, or |  | ||||||
|    (at your option) any later version. |  | ||||||
| 
 |  | ||||||
|    This program is distributed in the hope that it will be useful, |  | ||||||
|    but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
|    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
|    GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
|    You should have received a copy of the GNU Affero General Public License |  | ||||||
|    along with this program.  If not, see <http://www.gnu.org/licenses/>. |  | ||||||
| */ |  | ||||||
| 
 |  | ||||||
| package blob |  | ||||||
| 
 |  | ||||||
| // Storage is an interface for storing and retrieving blobs |  | ||||||
| // such as images, videos, and any other attachments/documents |  | ||||||
| // that shouldn't be stored in a database. |  | ||||||
| type Storage interface { |  | ||||||
| 	StoreFileAt(path string, data []byte) error |  | ||||||
| 	RetrieveFileFrom(path string) ([]byte, error) |  | ||||||
| 	ListKeys() ([]string, error) |  | ||||||
| 	RemoveFileAt(path string) error |  | ||||||
| } |  | ||||||
|  | @ -8,6 +8,7 @@ import ( | ||||||
| 	"os/signal" | 	"os/signal" | ||||||
| 	"syscall" | 	"syscall" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api" | 	"github.com/superseriousbusiness/gotosocial/internal/api" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/client/account" | 	"github.com/superseriousbusiness/gotosocial/internal/api/client/account" | ||||||
|  | @ -32,7 +33,6 @@ import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user" | 	"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger" | 	"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/api/security" | 	"github.com/superseriousbusiness/gotosocial/internal/api/security" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/cliactions" | 	"github.com/superseriousbusiness/gotosocial/internal/cliactions" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db/bundb" | 	"github.com/superseriousbusiness/gotosocial/internal/db/bundb" | ||||||
|  | @ -76,7 +76,8 @@ var Start cliactions.GTSAction = func(ctx context.Context, c *config.Config, log | ||||||
| 		return fmt.Errorf("error creating router: %s", err) | 		return fmt.Errorf("error creating router: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	storageBackend, err := blob.NewLocal(c, log) | 	// Open the storage backend | ||||||
|  | 	storage, err := kv.OpenFile(c.StorageConfig.BasePath, nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("error creating storage backend: %s", err) | 		return fmt.Errorf("error creating storage backend: %s", err) | ||||||
| 	} | 	} | ||||||
|  | @ -86,11 +87,11 @@ var Start cliactions.GTSAction = func(ctx context.Context, c *config.Config, log | ||||||
| 	timelineManager := timelineprocessing.NewManager(dbService, typeConverter, c, log) | 	timelineManager := timelineprocessing.NewManager(dbService, typeConverter, c, log) | ||||||
| 
 | 
 | ||||||
| 	// build backend handlers | 	// build backend handlers | ||||||
| 	mediaHandler := media.New(c, dbService, storageBackend, log) | 	mediaHandler := media.New(c, dbService, storage, log) | ||||||
| 	oauthServer := oauth.New(dbService, log) | 	oauthServer := oauth.New(dbService, log) | ||||||
| 	transportController := transport.NewController(c, dbService, &federation.Clock{}, http.DefaultClient, log) | 	transportController := transport.NewController(c, dbService, &federation.Clock{}, http.DefaultClient, log) | ||||||
| 	federator := federation.NewFederator(dbService, federatingDB, transportController, c, log, typeConverter, mediaHandler) | 	federator := federation.NewFederator(dbService, federatingDB, transportController, c, log, typeConverter, mediaHandler) | ||||||
| 	processor := processing.NewProcessor(c, typeConverter, federator, oauthServer, mediaHandler, storageBackend, timelineManager, dbService, log) | 	processor := processing.NewProcessor(c, typeConverter, federator, oauthServer, mediaHandler, storage, timelineManager, dbService, log) | ||||||
| 	if err := processor.Start(ctx); err != nil { | 	if err := processor.Start(ctx); err != nil { | ||||||
| 		return fmt.Errorf("error starting processor: %s", err) | 		return fmt.Errorf("error starting processor: %s", err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -24,11 +24,11 @@ import ( | ||||||
| 	"io" | 	"io" | ||||||
| 	"net/http" | 	"net/http" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/go-fed/activity/streams" | 	"github.com/go-fed/activity/streams" | ||||||
| 	"github.com/go-fed/activity/streams/vocab" | 	"github.com/go-fed/activity/streams/vocab" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation/dereferencing" | 	"github.com/superseriousbusiness/gotosocial/internal/federation/dereferencing" | ||||||
|  | @ -42,7 +42,7 @@ type DereferencerStandardTestSuite struct { | ||||||
| 	config  *config.Config | 	config  *config.Config | ||||||
| 	db      db.DB | 	db      db.DB | ||||||
| 	log     *logrus.Logger | 	log     *logrus.Logger | ||||||
| 	storage blob.Storage | 	storage *kv.KVStore | ||||||
| 
 | 
 | ||||||
| 	testRemoteStatuses    map[string]vocab.ActivityStreamsNote | 	testRemoteStatuses    map[string]vocab.ActivityStreamsNote | ||||||
| 	testRemoteAccounts    map[string]vocab.ActivityStreamsPerson | 	testRemoteAccounts    map[string]vocab.ActivityStreamsPerson | ||||||
|  |  | ||||||
|  | @ -24,13 +24,13 @@ import ( | ||||||
| 	"net/http/httptest" | 	"net/http/httptest" | ||||||
| 	"testing" | 	"testing" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/go-fed/activity/pub" | 	"github.com/go-fed/activity/pub" | ||||||
| 	"github.com/go-fed/httpsig" | 	"github.com/go-fed/httpsig" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/assert" | 	"github.com/stretchr/testify/assert" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 
 | 
 | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -45,7 +45,7 @@ type ProtocolTestSuite struct { | ||||||
| 	config        *config.Config | 	config        *config.Config | ||||||
| 	db            db.DB | 	db            db.DB | ||||||
| 	log           *logrus.Logger | 	log           *logrus.Logger | ||||||
| 	storage       blob.Storage | 	storage       *kv.KVStore | ||||||
| 	typeConverter typeutils.TypeConverter | 	typeConverter typeutils.TypeConverter | ||||||
| 	accounts      map[string]*gtsmodel.Account | 	accounts      map[string]*gtsmodel.Account | ||||||
| 	activities    map[string]testrig.ActivityWithSignature | 	activities    map[string]testrig.ActivityWithSignature | ||||||
|  | @ -65,7 +65,6 @@ func (suite *ProtocolTestSuite) SetupSuite() { | ||||||
| 
 | 
 | ||||||
| func (suite *ProtocolTestSuite) SetupTest() { | func (suite *ProtocolTestSuite) SetupTest() { | ||||||
| 	testrig.StandardDBSetup(suite.db, suite.accounts) | 	testrig.StandardDBSetup(suite.db, suite.accounts) | ||||||
| 
 |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // TearDownTest drops tables to make sure there's no data in the db | // TearDownTest drops tables to make sure there's no data in the db | ||||||
|  | @ -75,7 +74,6 @@ func (suite *ProtocolTestSuite) TearDownTest() { | ||||||
| 
 | 
 | ||||||
| // make sure PostInboxRequestBodyHook properly sets the inbox username and activity on the context | // make sure PostInboxRequestBodyHook properly sets the inbox username and activity on the context | ||||||
| func (suite *ProtocolTestSuite) TestPostInboxRequestBodyHook() { | func (suite *ProtocolTestSuite) TestPostInboxRequestBodyHook() { | ||||||
| 
 |  | ||||||
| 	// the activity we're gonna use | 	// the activity we're gonna use | ||||||
| 	activity := suite.activities["dm_for_zork"] | 	activity := suite.activities["dm_for_zork"] | ||||||
| 
 | 
 | ||||||
|  | @ -106,7 +104,6 @@ func (suite *ProtocolTestSuite) TestPostInboxRequestBodyHook() { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (suite *ProtocolTestSuite) TestAuthenticatePostInbox() { | func (suite *ProtocolTestSuite) TestAuthenticatePostInbox() { | ||||||
| 
 |  | ||||||
| 	// the activity we're gonna use | 	// the activity we're gonna use | ||||||
| 	activity := suite.activities["dm_for_zork"] | 	activity := suite.activities["dm_for_zork"] | ||||||
| 	sendingAccount := suite.accounts["remote_account_1"] | 	sendingAccount := suite.accounts["remote_account_1"] | ||||||
|  |  | ||||||
|  | @ -26,8 +26,8 @@ import ( | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||||
|  | @ -86,12 +86,12 @@ type Handler interface { | ||||||
| type mediaHandler struct { | type mediaHandler struct { | ||||||
| 	config  *config.Config | 	config  *config.Config | ||||||
| 	db      db.DB | 	db      db.DB | ||||||
| 	storage blob.Storage | 	storage *kv.KVStore | ||||||
| 	log     *logrus.Logger | 	log     *logrus.Logger | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // New returns a new handler with the given config, db, storage, and logger | // New returns a new handler with the given config, db, storage, and logger | ||||||
| func New(config *config.Config, database db.DB, storage blob.Storage, log *logrus.Logger) Handler { | func New(config *config.Config, database db.DB, storage *kv.KVStore, log *logrus.Logger) Handler { | ||||||
| 	return &mediaHandler{ | 	return &mediaHandler{ | ||||||
| 		config:  config, | 		config:  config, | ||||||
| 		db:      database, | 		db:      database, | ||||||
|  | @ -250,19 +250,19 @@ func (mh *mediaHandler) ProcessLocalEmoji(ctx context.Context, emojiBytes []byte | ||||||
| 
 | 
 | ||||||
| 	// serve url and storage path for the original emoji -- can be png or gif | 	// serve url and storage path for the original emoji -- can be png or gif | ||||||
| 	emojiURL := fmt.Sprintf("%s/%s/%s/%s/%s.%s", URLbase, instanceAccount.ID, Emoji, Original, newEmojiID, extension) | 	emojiURL := fmt.Sprintf("%s/%s/%s/%s/%s.%s", URLbase, instanceAccount.ID, Emoji, Original, newEmojiID, extension) | ||||||
| 	emojiPath := fmt.Sprintf("%s/%s/%s/%s/%s.%s", mh.config.StorageConfig.BasePath, instanceAccount.ID, Emoji, Original, newEmojiID, extension) | 	emojiPath := fmt.Sprintf("%s/%s/%s/%s.%s", instanceAccount.ID, Emoji, Original, newEmojiID, extension) | ||||||
| 
 | 
 | ||||||
| 	// serve url and storage path for the static version -- will always be png | 	// serve url and storage path for the static version -- will always be png | ||||||
| 	emojiStaticURL := fmt.Sprintf("%s/%s/%s/%s/%s.png", URLbase, instanceAccount.ID, Emoji, Static, newEmojiID) | 	emojiStaticURL := fmt.Sprintf("%s/%s/%s/%s/%s.png", URLbase, instanceAccount.ID, Emoji, Static, newEmojiID) | ||||||
| 	emojiStaticPath := fmt.Sprintf("%s/%s/%s/%s/%s.png", mh.config.StorageConfig.BasePath, instanceAccount.ID, Emoji, Static, newEmojiID) | 	emojiStaticPath := fmt.Sprintf("%s/%s/%s/%s.png", instanceAccount.ID, Emoji, Static, newEmojiID) | ||||||
| 
 | 
 | ||||||
| 	// store the original | 	// Store the original emoji | ||||||
| 	if err := mh.storage.StoreFileAt(emojiPath, original.image); err != nil { | 	if err := mh.storage.Put(emojiPath, original.image); err != nil { | ||||||
| 		return nil, fmt.Errorf("storage error: %s", err) | 		return nil, fmt.Errorf("storage error: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// store the static | 	// Store the static emoji | ||||||
| 	if err := mh.storage.StoreFileAt(emojiStaticPath, static.image); err != nil { | 	if err := mh.storage.Put(emojiStaticPath, static.image); err != nil { | ||||||
| 		return nil, fmt.Errorf("storage error: %s", err) | 		return nil, fmt.Errorf("storage error: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -293,7 +293,6 @@ func (mh *mediaHandler) ProcessLocalEmoji(ctx context.Context, emojiBytes []byte | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (mh *mediaHandler) ProcessRemoteHeaderOrAvatar(ctx context.Context, t transport.Transport, currentAttachment *gtsmodel.MediaAttachment, accountID string) (*gtsmodel.MediaAttachment, error) { | func (mh *mediaHandler) ProcessRemoteHeaderOrAvatar(ctx context.Context, t transport.Transport, currentAttachment *gtsmodel.MediaAttachment, accountID string) (*gtsmodel.MediaAttachment, error) { | ||||||
| 
 |  | ||||||
| 	if !currentAttachment.Header && !currentAttachment.Avatar { | 	if !currentAttachment.Header && !currentAttachment.Avatar { | ||||||
| 		return nil, errors.New("provided attachment was set to neither header nor avatar") | 		return nil, errors.New("provided attachment was set to neither header nor avatar") | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -84,14 +84,14 @@ func (mh *mediaHandler) processHeaderOrAvi(imageBytes []byte, contentType string | ||||||
| 	smallURL := fmt.Sprintf("%s/%s/%s/small/%s.%s", URLbase, accountID, mediaType, newMediaID, extension) | 	smallURL := fmt.Sprintf("%s/%s/%s/small/%s.%s", URLbase, accountID, mediaType, newMediaID, extension) | ||||||
| 
 | 
 | ||||||
| 	// we store the original... | 	// we store the original... | ||||||
| 	originalPath := fmt.Sprintf("%s/%s/%s/%s/%s.%s", mh.config.StorageConfig.BasePath, accountID, mediaType, Original, newMediaID, extension) | 	originalPath := fmt.Sprintf("%s/%s/%s/%s.%s", accountID, mediaType, Original, newMediaID, extension) | ||||||
| 	if err := mh.storage.StoreFileAt(originalPath, original.image); err != nil { | 	if err := mh.storage.Put(originalPath, original.image); err != nil { | ||||||
| 		return nil, fmt.Errorf("storage error: %s", err) | 		return nil, fmt.Errorf("storage error: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// and a thumbnail... | 	// and a thumbnail... | ||||||
| 	smallPath := fmt.Sprintf("%s/%s/%s/%s/%s.%s", mh.config.StorageConfig.BasePath, accountID, mediaType, Small, newMediaID, extension) | 	smallPath := fmt.Sprintf("%s/%s/%s/%s.%s", accountID, mediaType, Small, newMediaID, extension) | ||||||
| 	if err := mh.storage.StoreFileAt(smallPath, small.image); err != nil { | 	if err := mh.storage.Put(smallPath, small.image); err != nil { | ||||||
| 		return nil, fmt.Errorf("storage error: %s", err) | 		return nil, fmt.Errorf("storage error: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -72,14 +72,14 @@ func (mh *mediaHandler) processImageAttachment(data []byte, minAttachment *gtsmo | ||||||
| 	smallURL := fmt.Sprintf("%s/%s/attachment/small/%s.jpeg", URLbase, minAttachment.AccountID, newMediaID) // all thumbnails/smalls are encoded as jpeg | 	smallURL := fmt.Sprintf("%s/%s/attachment/small/%s.jpeg", URLbase, minAttachment.AccountID, newMediaID) // all thumbnails/smalls are encoded as jpeg | ||||||
| 
 | 
 | ||||||
| 	// we store the original... | 	// we store the original... | ||||||
| 	originalPath := fmt.Sprintf("%s/%s/%s/%s/%s.%s", mh.config.StorageConfig.BasePath, minAttachment.AccountID, Attachment, Original, newMediaID, extension) | 	originalPath := fmt.Sprintf("%s/%s/%s/%s.%s", minAttachment.AccountID, Attachment, Original, newMediaID, extension) | ||||||
| 	if err := mh.storage.StoreFileAt(originalPath, original.image); err != nil { | 	if err := mh.storage.Put(originalPath, original.image); err != nil { | ||||||
| 		return nil, fmt.Errorf("storage error: %s", err) | 		return nil, fmt.Errorf("storage error: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// and a thumbnail... | 	// and a thumbnail... | ||||||
| 	smallPath := fmt.Sprintf("%s/%s/%s/%s/%s.jpeg", mh.config.StorageConfig.BasePath, minAttachment.AccountID, Attachment, Small, newMediaID) // all thumbnails/smalls are encoded as jpeg | 	smallPath := fmt.Sprintf("%s/%s/%s/%s.jpeg", minAttachment.AccountID, Attachment, Small, newMediaID) // all thumbnails/smalls are encoded as jpeg | ||||||
| 	if err := mh.storage.StoreFileAt(smallPath, small.image); err != nil { | 	if err := mh.storage.Put(smallPath, small.image); err != nil { | ||||||
| 		return nil, fmt.Errorf("storage error: %s", err) | 		return nil, fmt.Errorf("storage error: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -130,5 +130,4 @@ func (mh *mediaHandler) processImageAttachment(data []byte, minAttachment *gtsmo | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return attachment, nil | 	return attachment, nil | ||||||
| 
 |  | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -19,10 +19,10 @@ | ||||||
| package account_test | package account_test | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/go-fed/activity/pub" | 	"github.com/go-fed/activity/pub" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -43,7 +43,7 @@ type AccountStandardTestSuite struct { | ||||||
| 	db                  db.DB | 	db                  db.DB | ||||||
| 	log                 *logrus.Logger | 	log                 *logrus.Logger | ||||||
| 	tc                  typeutils.TypeConverter | 	tc                  typeutils.TypeConverter | ||||||
| 	storage             blob.Storage | 	storage             *kv.KVStore | ||||||
| 	mediaHandler        media.Handler | 	mediaHandler        media.Handler | ||||||
| 	oauthServer         oauth.Server | 	oauthServer         oauth.Server | ||||||
| 	fromClientAPIChan   chan messages.FromClientAPI | 	fromClientAPIChan   chan messages.FromClientAPI | ||||||
|  |  | ||||||
|  | @ -24,14 +24,14 @@ func (p *processor) Delete(ctx context.Context, mediaAttachmentID string) gtserr | ||||||
| 
 | 
 | ||||||
| 	// delete the thumbnail from storage | 	// delete the thumbnail from storage | ||||||
| 	if attachment.Thumbnail.Path != "" { | 	if attachment.Thumbnail.Path != "" { | ||||||
| 		if err := p.storage.RemoveFileAt(attachment.Thumbnail.Path); err != nil { | 		if err := p.storage.Delete(attachment.Thumbnail.Path); err != nil { | ||||||
| 			errs = append(errs, fmt.Sprintf("remove thumbnail at path %s: %s", attachment.Thumbnail.Path, err)) | 			errs = append(errs, fmt.Sprintf("remove thumbnail at path %s: %s", attachment.Thumbnail.Path, err)) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// delete the file from storage | 	// delete the file from storage | ||||||
| 	if attachment.File.Path != "" { | 	if attachment.File.Path != "" { | ||||||
| 		if err := p.storage.RemoveFileAt(attachment.File.Path); err != nil { | 		if err := p.storage.Delete(attachment.File.Path); err != nil { | ||||||
| 			errs = append(errs, fmt.Sprintf("remove file at path %s: %s", attachment.File.Path, err)) | 			errs = append(errs, fmt.Sprintf("remove file at path %s: %s", attachment.File.Path, err)) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -110,7 +110,7 @@ func (p *processor) GetFile(ctx context.Context, account *gtsmodel.Account, form | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	bytes, err := p.storage.RetrieveFileFrom(storagePath) | 	bytes, err := p.storage.Get(storagePath) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, gtserror.NewErrorNotFound(fmt.Errorf("error retrieving from storage: %s", err)) | 		return nil, gtserror.NewErrorNotFound(fmt.Errorf("error retrieving from storage: %s", err)) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -21,9 +21,9 @@ package media | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||||
|  | @ -47,13 +47,13 @@ type processor struct { | ||||||
| 	tc           typeutils.TypeConverter | 	tc           typeutils.TypeConverter | ||||||
| 	config       *config.Config | 	config       *config.Config | ||||||
| 	mediaHandler media.Handler | 	mediaHandler media.Handler | ||||||
| 	storage      blob.Storage | 	storage      *kv.KVStore | ||||||
| 	db           db.DB | 	db           db.DB | ||||||
| 	log          *logrus.Logger | 	log          *logrus.Logger | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // New returns a new media processor. | // New returns a new media processor. | ||||||
| func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, storage blob.Storage, config *config.Config, log *logrus.Logger) Processor { | func New(db db.DB, tc typeutils.TypeConverter, mediaHandler media.Handler, storage *kv.KVStore, config *config.Config, log *logrus.Logger) Processor { | ||||||
| 	return &processor{ | 	return &processor{ | ||||||
| 		tc:           tc, | 		tc:           tc, | ||||||
| 		config:       config, | 		config:       config, | ||||||
|  |  | ||||||
|  | @ -23,9 +23,9 @@ import ( | ||||||
| 	"net/http" | 	"net/http" | ||||||
| 	"net/url" | 	"net/url" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | 	apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -234,7 +234,7 @@ type processor struct { | ||||||
| 	tc              typeutils.TypeConverter | 	tc              typeutils.TypeConverter | ||||||
| 	oauthServer     oauth.Server | 	oauthServer     oauth.Server | ||||||
| 	mediaHandler    media.Handler | 	mediaHandler    media.Handler | ||||||
| 	storage         blob.Storage | 	storage         *kv.KVStore | ||||||
| 	timelineManager timeline.Manager | 	timelineManager timeline.Manager | ||||||
| 	db              db.DB | 	db              db.DB | ||||||
| 	filter          visibility.Filter | 	filter          visibility.Filter | ||||||
|  | @ -251,8 +251,7 @@ type processor struct { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // NewProcessor returns a new Processor that uses the given federator and logger | // NewProcessor returns a new Processor that uses the given federator and logger | ||||||
| func NewProcessor(config *config.Config, tc typeutils.TypeConverter, federator federation.Federator, oauthServer oauth.Server, mediaHandler media.Handler, storage blob.Storage, timelineManager timeline.Manager, db db.DB, log *logrus.Logger) Processor { | func NewProcessor(config *config.Config, tc typeutils.TypeConverter, federator federation.Federator, oauthServer oauth.Server, mediaHandler media.Handler, storage *kv.KVStore, timelineManager timeline.Manager, db db.DB, log *logrus.Logger) Processor { | ||||||
| 
 |  | ||||||
| 	fromClientAPI := make(chan messages.FromClientAPI, 1000) | 	fromClientAPI := make(chan messages.FromClientAPI, 1000) | ||||||
| 	fromFederator := make(chan messages.FromFederator, 1000) | 	fromFederator := make(chan messages.FromFederator, 1000) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -21,9 +21,9 @@ package processing_test | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 
 | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/stretchr/testify/suite" | 	"github.com/stretchr/testify/suite" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" |  | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/config" | 	"github.com/superseriousbusiness/gotosocial/internal/config" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
|  | @ -43,7 +43,7 @@ type ProcessingStandardTestSuite struct { | ||||||
| 	config              *config.Config | 	config              *config.Config | ||||||
| 	db                  db.DB | 	db                  db.DB | ||||||
| 	log                 *logrus.Logger | 	log                 *logrus.Logger | ||||||
| 	storage             blob.Storage | 	storage             *kv.KVStore | ||||||
| 	typeconverter       typeutils.TypeConverter | 	typeconverter       typeutils.TypeConverter | ||||||
| 	transportController transport.Controller | 	transportController transport.Controller | ||||||
| 	federator           federation.Federator | 	federator           federation.Federator | ||||||
|  |  | ||||||
|  | @ -19,13 +19,13 @@ | ||||||
| package testrig | package testrig | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/transport" | 	"github.com/superseriousbusiness/gotosocial/internal/transport" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // NewTestFederator returns a federator with the given database and (mock!!) transport controller. | // NewTestFederator returns a federator with the given database and (mock!!) transport controller. | ||||||
| func NewTestFederator(db db.DB, tc transport.Controller, storage blob.Storage) federation.Federator { | func NewTestFederator(db db.DB, tc transport.Controller, storage *kv.KVStore) federation.Federator { | ||||||
| 	return federation.NewFederator(db, NewTestFederatingDB(db), tc, NewTestConfig(), NewTestLog(), NewTestTypeConverter(db), NewTestMediaHandler(db, storage)) | 	return federation.NewFederator(db, NewTestFederatingDB(db), tc, NewTestConfig(), NewTestLog(), NewTestTypeConverter(db), NewTestMediaHandler(db, storage)) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -19,13 +19,13 @@ | ||||||
| package testrig | package testrig | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/media" | 	"github.com/superseriousbusiness/gotosocial/internal/media" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // NewTestMediaHandler returns a media handler with the default test config, the default test logger, | // NewTestMediaHandler returns a media handler with the default test config, the default test logger, | ||||||
| // and the given db and storage. | // and the given db and storage. | ||||||
| func NewTestMediaHandler(db db.DB, storage blob.Storage) media.Handler { | func NewTestMediaHandler(db db.DB, storage *kv.KVStore) media.Handler { | ||||||
| 	return media.New(NewTestConfig(), db, storage, NewTestLog()) | 	return media.New(NewTestConfig(), db, storage, NewTestLog()) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -19,13 +19,13 @@ | ||||||
| package testrig | package testrig | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/db" | 	"github.com/superseriousbusiness/gotosocial/internal/db" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/federation" | 	"github.com/superseriousbusiness/gotosocial/internal/federation" | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/processing" | 	"github.com/superseriousbusiness/gotosocial/internal/processing" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // NewTestProcessor returns a Processor suitable for testing purposes | // NewTestProcessor returns a Processor suitable for testing purposes | ||||||
| func NewTestProcessor(db db.DB, storage blob.Storage, federator federation.Federator) processing.Processor { | func NewTestProcessor(db db.DB, storage *kv.KVStore, federator federation.Federator) processing.Processor { | ||||||
| 	return processing.NewProcessor(NewTestConfig(), NewTestTypeConverter(db), federator, NewTestOauthServer(db), NewTestMediaHandler(db, storage), storage, NewTestTimelineManager(db), db, NewTestLog()) | 	return processing.NewProcessor(NewTestConfig(), NewTestTypeConverter(db), federator, NewTestOauthServer(db), NewTestMediaHandler(db, storage), storage, NewTestTimelineManager(db), db, NewTestLog()) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -19,23 +19,28 @@ | ||||||
| package testrig | package testrig | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"errors" | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"io" | ||||||
| 	"os" | 	"os" | ||||||
| 
 | 
 | ||||||
| 	"github.com/superseriousbusiness/gotosocial/internal/blob" | 	"git.iim.gay/grufwub/go-store/kv" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/storage" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // NewTestStorage returns a new in memory storage with the default test config | // NewTestStorage returns a new in memory storage with the default test config | ||||||
| func NewTestStorage() blob.Storage { | func NewTestStorage() *kv.KVStore { | ||||||
| 	s, err := blob.NewInMem(NewTestConfig(), NewTestLog()) | 	storage, err := kv.OpenStorage(&inMemStorage{storage: map[string][]byte{}, overwrite: false}) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
| 	return s | 	return storage | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // StandardStorageSetup populates the storage with standard test entries from the given directory. | // StandardStorageSetup populates the storage with standard test entries from the given directory. | ||||||
| func StandardStorageSetup(s blob.Storage, relativePath string) { | func StandardStorageSetup(s *kv.KVStore, relativePath string) { | ||||||
| 	storedA := newTestStoredAttachments() | 	storedA := newTestStoredAttachments() | ||||||
| 	a := NewTestAttachments() | 	a := NewTestAttachments() | ||||||
| 	for k, paths := range storedA { | 	for k, paths := range storedA { | ||||||
|  | @ -51,14 +56,14 @@ func StandardStorageSetup(s blob.Storage, relativePath string) { | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		if err := s.StoreFileAt(pathOriginal, bOriginal); err != nil { | 		if err := s.Put(pathOriginal, bOriginal); err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		bSmall, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameSmall)) | 		bSmall, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameSmall)) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		if err := s.StoreFileAt(pathSmall, bSmall); err != nil { | 		if err := s.Put(pathSmall, bSmall); err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -78,28 +83,109 @@ func StandardStorageSetup(s blob.Storage, relativePath string) { | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		if err := s.StoreFileAt(pathOriginal, bOriginal); err != nil { | 		if err := s.Put(pathOriginal, bOriginal); err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		bStatic, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameStatic)) | 		bStatic, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameStatic)) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		if err := s.StoreFileAt(pathStatic, bStatic); err != nil { | 		if err := s.Put(pathStatic, bStatic); err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // StandardStorageTeardown deletes everything in storage so that it's clean for the next test | // StandardStorageTeardown deletes everything in storage so that it's clean for the next test | ||||||
| func StandardStorageTeardown(s blob.Storage) { | func StandardStorageTeardown(s *kv.KVStore) { | ||||||
| 	keys, err := s.ListKeys() | 	iter, err := s.Iterator(nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
|  | 	keys := []string{} | ||||||
|  | 	for iter.Next() { | ||||||
|  | 		keys = append(keys, iter.Key()) | ||||||
|  | 	} | ||||||
|  | 	iter.Release() | ||||||
| 	for _, k := range keys { | 	for _, k := range keys { | ||||||
| 		if err := s.RemoveFileAt(k); err != nil { | 		if err := s.Delete(k); err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | type inMemStorage struct { | ||||||
|  | 	storage   map[string][]byte | ||||||
|  | 	overwrite bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) Clean() error { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) ReadBytes(key string) ([]byte, error) { | ||||||
|  | 	b, ok := s.storage[key] | ||||||
|  | 	if !ok { | ||||||
|  | 		return nil, errors.New("key not found") | ||||||
|  | 	} | ||||||
|  | 	return b, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) ReadStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	b, err := s.ReadBytes(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return util.NopReadCloser(bytes.NewReader(b)), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) WriteBytes(key string, value []byte) error { | ||||||
|  | 	if _, ok := s.storage[key]; ok && !s.overwrite { | ||||||
|  | 		return errors.New("key already in storage") | ||||||
|  | 	} | ||||||
|  | 	s.storage[key] = copyBytes(value) | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) WriteStream(key string, r io.Reader) error { | ||||||
|  | 	b, err := io.ReadAll(r) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	return s.WriteBytes(key, b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) Stat(key string) (bool, error) { | ||||||
|  | 	_, ok := s.storage[key] | ||||||
|  | 	return ok, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) Remove(key string) error { | ||||||
|  | 	if _, ok := s.storage[key]; !ok { | ||||||
|  | 		return errors.New("key not found") | ||||||
|  | 	} | ||||||
|  | 	delete(s.storage, key) | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *inMemStorage) WalkKeys(opts *storage.WalkKeysOptions) error { | ||||||
|  | 	if opts == nil || opts.WalkFn == nil { | ||||||
|  | 		return errors.New("invalid walkfn") | ||||||
|  | 	} | ||||||
|  | 	for key := range s.storage { | ||||||
|  | 		opts.WalkFn(entry(key)) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type entry string | ||||||
|  | 
 | ||||||
|  | func (e entry) Key() string { | ||||||
|  | 	return string(e) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func copyBytes(b []byte) []byte { | ||||||
|  | 	p := make([]byte, len(b)) | ||||||
|  | 	copy(p, b) | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										7
									
								
								vendor/git.iim.gay/grufwub/fastpath/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								vendor/git.iim.gay/grufwub/fastpath/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,7 @@ | ||||||
|  | Alternative path library with a `strings.Builder` like path builder. | ||||||
|  | 
 | ||||||
|  | Benchmarks compared to `"path"`: | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | 
 | ||||||
|  | Better documentation + README in progress... | ||||||
							
								
								
									
										
											BIN
										
									
								
								vendor/git.iim.gay/grufwub/fastpath/benchmarks.png
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								vendor/git.iim.gay/grufwub/fastpath/benchmarks.png
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 106 KiB | 
							
								
								
									
										379
									
								
								vendor/git.iim.gay/grufwub/fastpath/path.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										379
									
								
								vendor/git.iim.gay/grufwub/fastpath/path.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,379 @@ | ||||||
|  | package fastpath | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // allocate this just once | ||||||
|  | var dot = []byte(".") | ||||||
|  | 
 | ||||||
|  | type Builder struct { | ||||||
|  | 	noCopy noCopy | ||||||
|  | 
 | ||||||
|  | 	b  []byte // b is the underlying byte buffer | ||||||
|  | 	dd int    // pos of last '..' appended to builder | ||||||
|  | 
 | ||||||
|  | 	abs bool // abs stores whether path passed to first .Append() is absolute | ||||||
|  | 	set bool // set stores whether b.abs has been set i.e. not first call to .Append() | ||||||
|  | 
 | ||||||
|  | 	// lp int // pos of beginning of previous path segment | ||||||
|  | 	// cp int // pos of beginning of current path segment | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBuilder returns a new Builder object using the supplied byte | ||||||
|  | // slice as the underlying buffer | ||||||
|  | func NewBuilder(b []byte) Builder { | ||||||
|  | 	if b != nil { | ||||||
|  | 		b = b[:0] | ||||||
|  | 	} | ||||||
|  | 	return Builder{ | ||||||
|  | 		noCopy: noCopy{}, | ||||||
|  | 
 | ||||||
|  | 		b:  b, | ||||||
|  | 		dd: 0, | ||||||
|  | 
 | ||||||
|  | 		abs: false, | ||||||
|  | 		set: false, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Reset resets the Builder object | ||||||
|  | func (b *Builder) Reset() { | ||||||
|  | 	b.b = b.b[:0] | ||||||
|  | 	b.dd = 0 | ||||||
|  | 	b.abs = false | ||||||
|  | 	b.set = false | ||||||
|  | 	// b.lp = 0 | ||||||
|  | 	// b.cp = 0 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Len returns the number of accumulated bytes in the Builder | ||||||
|  | func (b *Builder) Len() int { | ||||||
|  | 	return len(b.b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Cap returns the capacity of the underlying Builder buffer | ||||||
|  | func (b *Builder) Cap() int { | ||||||
|  | 	return cap(b.b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Bytes returns the accumulated path bytes. | ||||||
|  | func (b *Builder) Bytes() []byte { | ||||||
|  | 	if b.Len() < 1 { | ||||||
|  | 		return dot | ||||||
|  | 	} | ||||||
|  | 	return b.b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns the accumulated path string. | ||||||
|  | func (b *Builder) String() string { | ||||||
|  | 	if b.Len() < 1 { | ||||||
|  | 		return string(dot) | ||||||
|  | 	} | ||||||
|  | 	return string(b.b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StringPtr returns a ptr to the accumulated path string. | ||||||
|  | // | ||||||
|  | // Please note the underlying byte slice for this string is | ||||||
|  | // tied to the builder, so any changes will result in the | ||||||
|  | // returned string changing. Consider using .String() if | ||||||
|  | // this is undesired behaviour. | ||||||
|  | func (b *Builder) StringPtr() string { | ||||||
|  | 	if b.Len() < 1 { | ||||||
|  | 		return *(*string)(unsafe.Pointer(&dot)) | ||||||
|  | 	} | ||||||
|  | 	return *(*string)(unsafe.Pointer(&b.b)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Basename returns the base name of the accumulated path string | ||||||
|  | // func (b *Builder) Basename() string { | ||||||
|  | // 	if b.cp >= b.Len() { | ||||||
|  | // 		return dot | ||||||
|  | // 	} | ||||||
|  | // 	return deepcopy(b.string()[b.cp:]) | ||||||
|  | // } | ||||||
|  | 
 | ||||||
|  | // BasenamePtr returns a ptr to the base name of the accumulated | ||||||
|  | // path string. | ||||||
|  | // | ||||||
|  | // Please note the underlying byte slice for this string is | ||||||
|  | // tied to the builder, so any changes will result in the | ||||||
|  | // returned string changing. Consider using .NewString() if | ||||||
|  | // this is undesired behaviour. | ||||||
|  | // func (b *Builder) BasenamePtr() string { | ||||||
|  | // 	if b.cp >= b.Len() { | ||||||
|  | // 		return dot | ||||||
|  | // 	} | ||||||
|  | // 	return b.string()[b.cp:] | ||||||
|  | // } | ||||||
|  | 
 | ||||||
|  | // Dirname returns the dir path of the accumulated path string | ||||||
|  | // func (b *Builder) Dirname() string { | ||||||
|  | // 	if b.cp < 1 || b.cp-1 >= b.Len() { | ||||||
|  | // 		return dot | ||||||
|  | // 	} | ||||||
|  | // 	return deepcopy(b.string()[:b.cp-1]) | ||||||
|  | // } | ||||||
|  | 
 | ||||||
|  | // DirnamePtr returns a ptr to the dir path of the accumulated | ||||||
|  | // path string. | ||||||
|  | // | ||||||
|  | // Please note the underlying byte slice for this string is | ||||||
|  | // tied to the builder, so any changes will result in the | ||||||
|  | // returned string changing. Consider using .NewString() if | ||||||
|  | // this is undesired behaviour. | ||||||
|  | // func (b *Builder) DirnamePtr() string { | ||||||
|  | // 	if b.cp < 1 || b.cp-1 >= b.Len() { | ||||||
|  | // 		return dot | ||||||
|  | // 	} | ||||||
|  | // 	return b.String()[:b.cp-1] | ||||||
|  | // } | ||||||
|  | 
 | ||||||
|  | func (b *Builder) Absolute() bool { | ||||||
|  | 	return b.abs | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Builder) SetAbsolute(val bool) { | ||||||
|  | 	if !b.set { | ||||||
|  | 		if val { | ||||||
|  | 			// .Append() has not be called, | ||||||
|  | 			// add a '/' and set abs | ||||||
|  | 			b.guarantee(1) | ||||||
|  | 			b.appendByte('/') | ||||||
|  | 			b.abs = true | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Set as having been set | ||||||
|  | 		b.set = true | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if !val && b.abs { | ||||||
|  | 		// Already set and absolute. Update | ||||||
|  | 		b.abs = false | ||||||
|  | 
 | ||||||
|  | 		// If not empty (i.e. not just '/'), | ||||||
|  | 		// then shift bytes 1 left | ||||||
|  | 		if b.Len() > 1 { | ||||||
|  | 			copy(b.b, b.b[1:]) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Truncate 1 byte. In the case of empty, | ||||||
|  | 		// i.e. just '/' then it will drop this | ||||||
|  | 		b.truncate(1) | ||||||
|  | 	} else if val && !b.abs { | ||||||
|  | 		// Already set but NOT abs. Update | ||||||
|  | 		b.abs = true | ||||||
|  | 
 | ||||||
|  | 		// Guarantee 1 byte available | ||||||
|  | 		b.guarantee(1) | ||||||
|  | 
 | ||||||
|  | 		// If empty, just append '/' | ||||||
|  | 		if b.Len() < 1 { | ||||||
|  | 			b.appendByte('/') | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Increase length | ||||||
|  | 		l := b.Len() | ||||||
|  | 		b.b = b.b[:l+1] | ||||||
|  | 
 | ||||||
|  | 		// Shift bytes 1 right | ||||||
|  | 		copy(b.b[1:], b.b[:l]) | ||||||
|  | 
 | ||||||
|  | 		// Set first byte '/' | ||||||
|  | 		b.b[0] = '/' | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Append adds and cleans the supplied path bytes to the | ||||||
|  | // builder's internal buffer, growing the buffer if necessary | ||||||
|  | // to accomodate the extra path length | ||||||
|  | func (b *Builder) Append(p []byte) { | ||||||
|  | 	b.AppendString(*(*string)(unsafe.Pointer(&p))) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AppendString adds and cleans the supplied path string to the | ||||||
|  | // builder's internal buffer, growing the buffer if necessary | ||||||
|  | // to accomodate the extra path length | ||||||
|  | func (b *Builder) AppendString(path string) { | ||||||
|  | 	defer func() { | ||||||
|  | 		// If buffer is empty, and an absolute path, | ||||||
|  | 		// ensure it starts with a '/' | ||||||
|  | 		if b.Len() < 1 && b.abs { | ||||||
|  | 			b.appendByte('/') | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
|  | 
 | ||||||
|  | 	// Empty path, nothing to do | ||||||
|  | 	if len(path) == 0 { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Guarantee at least the total length | ||||||
|  | 	// of supplied path available in the buffer | ||||||
|  | 	b.guarantee(len(path)) | ||||||
|  | 
 | ||||||
|  | 	// Try store if absolute | ||||||
|  | 	if !b.set { | ||||||
|  | 		b.abs = len(path) > 0 && path[0] == '/' | ||||||
|  | 		b.set = true | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	i := 0 | ||||||
|  | 	for i < len(path) { | ||||||
|  | 		switch { | ||||||
|  | 		// Empty path segment | ||||||
|  | 		case path[i] == '/': | ||||||
|  | 			i++ | ||||||
|  | 
 | ||||||
|  | 		// Singular '.' path segment, treat as empty | ||||||
|  | 		case path[i] == '.' && (i+1 == len(path) || path[i+1] == '/'): | ||||||
|  | 			i++ | ||||||
|  | 
 | ||||||
|  | 		// Backtrack segment | ||||||
|  | 		case path[i] == '.' && path[i+1] == '.' && (i+2 == len(path) || path[i+2] == '/'): | ||||||
|  | 			i += 2 | ||||||
|  | 
 | ||||||
|  | 			switch { | ||||||
|  | 			// Check if it's possible to backtrack with | ||||||
|  | 			// our current state of the buffer. i.e. is | ||||||
|  | 			// our buffer length longer than the last | ||||||
|  | 			// '..' we placed? | ||||||
|  | 			case b.Len() > b.dd: | ||||||
|  | 				b.backtrack() | ||||||
|  | 				// b.cp = b.lp | ||||||
|  | 				// b.lp = 0 | ||||||
|  | 
 | ||||||
|  | 			// If we reached here, need to check if | ||||||
|  | 			// we can append '..' to the path buffer, | ||||||
|  | 			// which is ONLY when path is NOT absolute | ||||||
|  | 			case !b.abs: | ||||||
|  | 				if b.Len() > 0 { | ||||||
|  | 					b.appendByte('/') | ||||||
|  | 				} | ||||||
|  | 				b.appendByte('.') | ||||||
|  | 				b.appendByte('.') | ||||||
|  | 				b.dd = b.Len() | ||||||
|  | 				// b.lp = lp - 2 | ||||||
|  | 				// b.cp = b.dd | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 		default: | ||||||
|  | 			if (b.abs && b.Len() != 1) || (!b.abs && b.Len() > 0) { | ||||||
|  | 				b.appendByte('/') | ||||||
|  | 			} | ||||||
|  | 			// b.lp = b.cp | ||||||
|  | 			// b.cp = b.Len() | ||||||
|  | 			i += b.appendSlice(path[i:]) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Clean creates the shortest possible functional equivalent | ||||||
|  | // to the supplied path, resetting the builder before performing | ||||||
|  | // this operation. The builder object is NOT reset after return | ||||||
|  | func (b *Builder) Clean(path string) string { | ||||||
|  | 	b.Reset() | ||||||
|  | 	b.AppendString(path) | ||||||
|  | 	return b.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Join connects and cleans multiple paths, resetting the builder before | ||||||
|  | // performing this operation and returning the shortest possible combination | ||||||
|  | // of all the supplied paths. The builder object is NOT reset after return | ||||||
|  | func (b *Builder) Join(base string, paths ...string) string { | ||||||
|  | 	empty := (len(base) < 1) | ||||||
|  | 	b.Reset() | ||||||
|  | 	b.AppendString(base) | ||||||
|  | 	for _, path := range paths { | ||||||
|  | 		b.AppendString(path) | ||||||
|  | 		empty = empty && (len(path) < 1) | ||||||
|  | 	} | ||||||
|  | 	if empty { | ||||||
|  | 		return "" | ||||||
|  | 	} | ||||||
|  | 	return b.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Guarantee ensures there is at least the requested size | ||||||
|  | // free bytes available in the buffer, reallocating if | ||||||
|  | // necessary | ||||||
|  | func (b *Builder) Guarantee(size int) { | ||||||
|  | 	b.guarantee(size) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Truncate reduces the length of the buffer by the requested | ||||||
|  | // number of bytes. If the builder is set to absolute, the first | ||||||
|  | // byte (i.e. '/') will never be truncated | ||||||
|  | func (b *Builder) Truncate(size int) { | ||||||
|  | 	// If absolute and just '/', do nothing | ||||||
|  | 	if b.abs && b.Len() == 1 { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Truncate requested bytes | ||||||
|  | 	b.truncate(size) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // truncate reduces the length of the buffer by the requested size, | ||||||
|  | // no sanity checks are performed | ||||||
|  | func (b *Builder) truncate(size int) { | ||||||
|  | 	b.b = b.b[:b.Len()-size] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // guarantee ensures there is at least the requested size | ||||||
|  | // free bytes available in the buffer, reallocating if necessary. | ||||||
|  | // no sanity checks are performed | ||||||
|  | func (b *Builder) guarantee(size int) { | ||||||
|  | 	if size > b.Cap()-b.Len() { | ||||||
|  | 		nb := make([]byte, 2*b.Cap()+size) | ||||||
|  | 		copy(nb, b.b) | ||||||
|  | 		b.b = nb[:b.Len()] | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendByte appends the supplied byte to the end of | ||||||
|  | // the buffer. appending is achieved by continually reslicing the | ||||||
|  | // buffer and setting the next byte-at-index, this is safe as guarantee() | ||||||
|  | // will have been called beforehand | ||||||
|  | func (b *Builder) appendByte(c byte) { | ||||||
|  | 	b.b = b.b[:b.Len()+1] | ||||||
|  | 	b.b[b.Len()-1] = c | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendSlice appends the supplied string slice to | ||||||
|  | // the end of the buffer and returns the number of indices | ||||||
|  | // we were able to iterate before hitting a path separator '/'. | ||||||
|  | // appending is achieved by continually reslicing the buffer | ||||||
|  | // and setting the next byte-at-index, this is safe as guarantee() | ||||||
|  | // will have been called beforehand | ||||||
|  | func (b *Builder) appendSlice(slice string) int { | ||||||
|  | 	i := 0 | ||||||
|  | 	for i < len(slice) && slice[i] != '/' { | ||||||
|  | 		b.b = b.b[:b.Len()+1] | ||||||
|  | 		b.b[b.Len()-1] = slice[i] | ||||||
|  | 		i++ | ||||||
|  | 	} | ||||||
|  | 	return i | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // backtrack reduces the end of the buffer back to the last | ||||||
|  | // separating '/', or end of buffer | ||||||
|  | func (b *Builder) backtrack() { | ||||||
|  | 	b.b = b.b[:b.Len()-1] | ||||||
|  | 
 | ||||||
|  | 	for b.Len()-1 > b.dd && b.b[b.Len()-1] != '/' { | ||||||
|  | 		b.b = b.b[:b.Len()-1] | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if b.Len() > 0 { | ||||||
|  | 		b.b = b.b[:b.Len()-1] | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type noCopy struct{} | ||||||
|  | 
 | ||||||
|  | func (n *noCopy) Lock()   {} | ||||||
|  | func (n *noCopy) Unlock() {} | ||||||
							
								
								
									
										32
									
								
								vendor/git.iim.gay/grufwub/fastpath/pool.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/git.iim.gay/grufwub/fastpath/pool.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,32 @@ | ||||||
|  | package fastpath | ||||||
|  | 
 | ||||||
|  | import "sync" | ||||||
|  | 
 | ||||||
|  | // 1/8 max unix path length | ||||||
|  | const defaultBufSize = 512 | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	builderPool sync.Pool | ||||||
|  | 	once        = sync.Once{} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func pool() *sync.Pool { | ||||||
|  | 	once.Do(func() { | ||||||
|  | 		builderPool = sync.Pool{ | ||||||
|  | 			New: func() interface{} { | ||||||
|  | 				builder := NewBuilder(make([]byte, defaultBufSize)) | ||||||
|  | 				return &builder | ||||||
|  | 			}, | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	return &builderPool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func AcquireBuilder() *Builder { | ||||||
|  | 	return pool().Get().(*Builder) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func ReleaseBuilder(b *Builder) { | ||||||
|  | 	b.Reset() | ||||||
|  | 	pool().Put(b) | ||||||
|  | } | ||||||
							
								
								
									
										6
									
								
								vendor/git.iim.gay/grufwub/go-bufpool/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/git.iim.gay/grufwub/go-bufpool/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,6 @@ | ||||||
|  | Variable size bufferpool that supports storing buffers up to 512kb in size | ||||||
|  | 
 | ||||||
|  | See documentation for more information: https://godocs.io/git.iim.gay/grufwub/go-bufpool | ||||||
|  | 
 | ||||||
|  | Please note, the test here is a worst-case scenario for allocations (the size | ||||||
|  | requests always increase so a new slice is always required) | ||||||
							
								
								
									
										12
									
								
								vendor/git.iim.gay/grufwub/go-bufpool/log2_table.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								vendor/git.iim.gay/grufwub/go-bufpool/log2_table.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										96
									
								
								vendor/git.iim.gay/grufwub/go-bufpool/pool.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										96
									
								
								vendor/git.iim.gay/grufwub/go-bufpool/pool.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,96 @@ | ||||||
|  | package bufpool | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-bytes" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // MAX returns the maximum possible sized slice that can be stored in a BufferPool | ||||||
|  | func MAX() int { | ||||||
|  | 	return log2Max | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BufferPool is a variable sized buffer pool, separated into memory pages increasing | ||||||
|  | // by powers of 2. This can offer large improvements over a sync.Pool designed to allocate | ||||||
|  | // buffers of single sizes, or multiple buffer pools of differing allocation sizes | ||||||
|  | type BufferPool struct { | ||||||
|  | 	noCopy noCopy //nolint | ||||||
|  | 
 | ||||||
|  | 	// pools is a predefined-length array of sync.Pools, handling | ||||||
|  | 	// ranges in capacity of 2**(n) --> 2**(n+1) | ||||||
|  | 	pools [log2MaxPower + 1]sync.Pool | ||||||
|  | 	once  sync.Once | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // init simply sets the allocator funcs for each of the pools | ||||||
|  | func (p *BufferPool) init() { | ||||||
|  | 	for i := range p.pools { | ||||||
|  | 		p.pools[i].New = func() interface{} { | ||||||
|  | 			return &bytes.Buffer{} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Get retrieves a Buffer of at least supplied capacity from the pool, | ||||||
|  | // allocating only if strictly necessary. If a capacity above the maximum | ||||||
|  | // supported (see .MAX()) is requested, a slice is allocated with | ||||||
|  | // expectance that it will just be dropped on call to .Put() | ||||||
|  | func (p *BufferPool) Get(cap int) *bytes.Buffer { | ||||||
|  | 	// If cap out of bounds, just alloc | ||||||
|  | 	if cap < 2 || cap > log2Max { | ||||||
|  | 		buf := bytes.NewBuffer(make([]byte, 0, cap)) | ||||||
|  | 		return &buf | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Ensure initialized | ||||||
|  | 	p.once.Do(p.init) | ||||||
|  | 
 | ||||||
|  | 	// Calculate page idx from log2 table | ||||||
|  | 	pow := uint8(log2Table[cap]) | ||||||
|  | 	pool := &p.pools[pow-1] | ||||||
|  | 
 | ||||||
|  | 	// Attempt to fetch buf from pool | ||||||
|  | 	buf := pool.Get().(*bytes.Buffer) | ||||||
|  | 
 | ||||||
|  | 	// Check of required capacity | ||||||
|  | 	if buf.Cap() < cap { | ||||||
|  | 		// We allocate via this method instead | ||||||
|  | 		// of by buf.Guarantee() as this way we | ||||||
|  | 		// can allocate only what the user requested. | ||||||
|  | 		// | ||||||
|  | 		// buf.Guarantee() can allocate alot more... | ||||||
|  | 		buf.B = make([]byte, 0, cap) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return buf | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Put resets and place the supplied Buffer back in its appropriate pool. Buffers | ||||||
|  | // Buffers below or above maximum supported capacity (see .MAX()) will be dropped | ||||||
|  | func (p *BufferPool) Put(buf *bytes.Buffer) { | ||||||
|  | 	// Drop out of size range buffers | ||||||
|  | 	if buf.Cap() < 2 || buf.Cap() > log2Max { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Ensure initialized | ||||||
|  | 	p.once.Do(p.init) | ||||||
|  | 
 | ||||||
|  | 	// Calculate page idx from log2 table | ||||||
|  | 	pow := uint8(log2Table[buf.Cap()]) | ||||||
|  | 	pool := &p.pools[pow-1] | ||||||
|  | 
 | ||||||
|  | 	// Reset, place in pool | ||||||
|  | 	buf.Reset() | ||||||
|  | 	pool.Put(buf) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | //nolint | ||||||
|  | type noCopy struct{} | ||||||
|  | 
 | ||||||
|  | //nolint | ||||||
|  | func (n *noCopy) Lock() {} | ||||||
|  | 
 | ||||||
|  | //nolint | ||||||
|  | func (n *noCopy) Unlock() {} | ||||||
							
								
								
									
										12
									
								
								vendor/git.iim.gay/grufwub/go-bytes/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								vendor/git.iim.gay/grufwub/go-bytes/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,12 @@ | ||||||
|  | drop-in replacement for standard "bytes" library | ||||||
|  | 
 | ||||||
|  | contains alternative Buffer implementation that provides direct access to the | ||||||
|  | underlying byte-slice, with some interesting alternative struct methods. provides | ||||||
|  | no safety guards, if you pass bad values it will blow up in your face... | ||||||
|  | 
 | ||||||
|  | and alternative `ToUpper()` and `ToLower()` implementations that use lookup | ||||||
|  | tables for improved performance | ||||||
|  | 
 | ||||||
|  | provides direct call-throughs to most of the "bytes" library functions to facilitate | ||||||
|  | this being a direct drop-in. in some time, i may offer alternative implementations | ||||||
|  | for other functions too | ||||||
							
								
								
									
										138
									
								
								vendor/git.iim.gay/grufwub/go-bytes/buffer.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										138
									
								
								vendor/git.iim.gay/grufwub/go-bytes/buffer.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,138 @@ | ||||||
|  | package bytes | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"unicode/utf8" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Buffer is a very simple buffer implementation that allows | ||||||
|  | // access to and reslicing of the underlying byte slice. | ||||||
|  | type Buffer struct { | ||||||
|  | 	noCopy noCopy | ||||||
|  | 	B      []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func NewBuffer(b []byte) Buffer { | ||||||
|  | 	return Buffer{ | ||||||
|  | 		noCopy: noCopy{}, | ||||||
|  | 		B:      b, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Write(p []byte) (int, error) { | ||||||
|  | 	b.Grow(len(p)) | ||||||
|  | 	return copy(b.B[b.Len()-len(p):], p), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) WriteString(s string) (int, error) { | ||||||
|  | 	b.Grow(len(s)) | ||||||
|  | 	return copy(b.B[b.Len()-len(s):], s), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) WriteByte(c byte) error { | ||||||
|  | 	l := b.Len() | ||||||
|  | 	b.Grow(1) | ||||||
|  | 	b.B[l] = c | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) WriteRune(r rune) (int, error) { | ||||||
|  | 	if r < utf8.RuneSelf { | ||||||
|  | 		b.WriteByte(byte(r)) | ||||||
|  | 		return 1, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	l := b.Len() | ||||||
|  | 	b.Grow(utf8.UTFMax) | ||||||
|  | 	n := utf8.EncodeRune(b.B[l:b.Len()], r) | ||||||
|  | 	b.B = b.B[:l+n] | ||||||
|  | 
 | ||||||
|  | 	return n, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) WriteAt(p []byte, start int64) (int, error) { | ||||||
|  | 	b.Grow(len(p) - int(int64(b.Len())-start)) | ||||||
|  | 	return copy(b.B[start:], p), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) WriteStringAt(s string, start int64) (int, error) { | ||||||
|  | 	b.Grow(len(s) - int(int64(b.Len())-start)) | ||||||
|  | 	return copy(b.B[start:], s), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Truncate(size int) { | ||||||
|  | 	b.B = b.B[:b.Len()-size] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) ShiftByte(index int) { | ||||||
|  | 	copy(b.B[index:], b.B[index+1:]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Shift(start int64, size int) { | ||||||
|  | 	copy(b.B[start:], b.B[start+int64(size):]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) DeleteByte(index int) { | ||||||
|  | 	b.ShiftByte(index) | ||||||
|  | 	b.Truncate(1) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Delete(start int64, size int) { | ||||||
|  | 	b.Shift(start, size) | ||||||
|  | 	b.Truncate(size) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) InsertByte(index int64, c byte) { | ||||||
|  | 	l := b.Len() | ||||||
|  | 	b.Grow(1) | ||||||
|  | 	copy(b.B[index+1:], b.B[index:l]) | ||||||
|  | 	b.B[index] = c | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Insert(index int64, p []byte) { | ||||||
|  | 	l := b.Len() | ||||||
|  | 	b.Grow(len(p)) | ||||||
|  | 	copy(b.B[index+int64(len(p)):], b.B[index:l]) | ||||||
|  | 	copy(b.B[index:], p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Bytes() []byte { | ||||||
|  | 	return b.B | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) String() string { | ||||||
|  | 	return string(b.B) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) StringPtr() string { | ||||||
|  | 	return BytesToString(b.B) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Cap() int { | ||||||
|  | 	return cap(b.B) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Len() int { | ||||||
|  | 	return len(b.B) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Reset() { | ||||||
|  | 	b.B = b.B[:0] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Grow(size int) { | ||||||
|  | 	b.Guarantee(size) | ||||||
|  | 	b.B = b.B[:b.Len()+size] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *Buffer) Guarantee(size int) { | ||||||
|  | 	if size > b.Cap()-b.Len() { | ||||||
|  | 		nb := make([]byte, 2*b.Cap()+size) | ||||||
|  | 		copy(nb, b.B) | ||||||
|  | 		b.B = nb[:b.Len()] | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type noCopy struct{} | ||||||
|  | 
 | ||||||
|  | func (n *noCopy) Lock()   {} | ||||||
|  | func (n *noCopy) Unlock() {} | ||||||
							
								
								
									
										261
									
								
								vendor/git.iim.gay/grufwub/go-bytes/bytes.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										261
									
								
								vendor/git.iim.gay/grufwub/go-bytes/bytes.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,261 @@ | ||||||
|  | package bytes | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"reflect" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	_ Bytes = &Buffer{} | ||||||
|  | 	_ Bytes = bytesType{} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Bytes defines a standard way of retrieving the content of a | ||||||
|  | // byte buffer of some-kind. | ||||||
|  | type Bytes interface { | ||||||
|  | 	// Bytes returns the byte slice content | ||||||
|  | 	Bytes() []byte | ||||||
|  | 
 | ||||||
|  | 	// String returns byte slice cast directly to string, this | ||||||
|  | 	// will cause an allocation but comes with the safety of | ||||||
|  | 	// being an immutable Go string | ||||||
|  | 	String() string | ||||||
|  | 
 | ||||||
|  | 	// StringPtr returns byte slice cast to string via the unsafe | ||||||
|  | 	// package. This comes with the same caveats of accessing via | ||||||
|  | 	// .Bytes() in that the content is liable change and is NOT | ||||||
|  | 	// immutable, despite being a string type | ||||||
|  | 	StringPtr() string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type bytesType []byte | ||||||
|  | 
 | ||||||
|  | func (b bytesType) Bytes() []byte { | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b bytesType) String() string { | ||||||
|  | 	return string(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b bytesType) StringPtr() string { | ||||||
|  | 	return BytesToString(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ToBytes casts the provided byte slice as the simplest possible | ||||||
|  | // Bytes interface implementation | ||||||
|  | func ToBytes(b []byte) Bytes { | ||||||
|  | 	return bytesType(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Copy returns a new copy of slice b, does NOT maintain nil values | ||||||
|  | func Copy(b []byte) []byte { | ||||||
|  | 	p := make([]byte, len(b)) | ||||||
|  | 	copy(p, b) | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BytesToString returns byte slice cast to string via the "unsafe" package | ||||||
|  | func BytesToString(b []byte) string { | ||||||
|  | 	return *(*string)(unsafe.Pointer(&b)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StringToBytes returns the string cast to string via the "unsafe" and "reflect" packages | ||||||
|  | func StringToBytes(s string) []byte { | ||||||
|  | 	// thank you to https://github.com/valyala/fasthttp/blob/master/bytesconv.go | ||||||
|  | 	var b []byte | ||||||
|  | 
 | ||||||
|  | 	// Get byte + string headers | ||||||
|  | 	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) | ||||||
|  | 	sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) | ||||||
|  | 
 | ||||||
|  | 	// Manually set bytes to string | ||||||
|  | 	bh.Data = sh.Data | ||||||
|  | 	bh.Len = sh.Len | ||||||
|  | 	bh.Cap = sh.Len | ||||||
|  | 
 | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // // InsertByte inserts the supplied byte into the slice at provided position | ||||||
|  | // func InsertByte(b []byte, at int, c byte) []byte { | ||||||
|  | // 	return append(append(b[:at], c), b[at:]...) | ||||||
|  | // } | ||||||
|  | 
 | ||||||
|  | // // Insert inserts the supplied byte slice into the slice at provided position | ||||||
|  | // func Insert(b []byte, at int, s []byte) []byte { | ||||||
|  | // 	return append(append(b[:at], s...), b[at:]...) | ||||||
|  | // } | ||||||
|  | 
 | ||||||
|  | // ToUpper offers a faster ToUpper implementation using a lookup table | ||||||
|  | func ToUpper(b []byte) { | ||||||
|  | 	for i := 0; i < len(b); i++ { | ||||||
|  | 		c := &b[i] | ||||||
|  | 		*c = toUpperTable[*c] | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ToLower offers a faster ToLower implementation using a lookup table | ||||||
|  | func ToLower(b []byte) { | ||||||
|  | 	for i := 0; i < len(b); i++ { | ||||||
|  | 		c := &b[i] | ||||||
|  | 		*c = toLowerTable[*c] | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasBytePrefix returns whether b has the provided byte prefix | ||||||
|  | func HasBytePrefix(b []byte, c byte) bool { | ||||||
|  | 	return (len(b) > 0) && (b[0] == c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasByteSuffix returns whether b has the provided byte suffix | ||||||
|  | func HasByteSuffix(b []byte, c byte) bool { | ||||||
|  | 	return (len(b) > 0) && (b[len(b)-1] == c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasBytePrefix returns b without the provided leading byte | ||||||
|  | func TrimBytePrefix(b []byte, c byte) []byte { | ||||||
|  | 	if HasBytePrefix(b, c) { | ||||||
|  | 		return b[1:] | ||||||
|  | 	} | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TrimByteSuffix returns b without the provided trailing byte | ||||||
|  | func TrimByteSuffix(b []byte, c byte) []byte { | ||||||
|  | 	if HasByteSuffix(b, c) { | ||||||
|  | 		return b[:len(b)-1] | ||||||
|  | 	} | ||||||
|  | 	return b | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Compare is a direct call-through to standard library bytes.Compare() | ||||||
|  | func Compare(b, s []byte) int { | ||||||
|  | 	return bytes.Compare(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Contains is a direct call-through to standard library bytes.Contains() | ||||||
|  | func Contains(b, s []byte) bool { | ||||||
|  | 	return bytes.Contains(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TrimPrefix is a direct call-through to standard library bytes.TrimPrefix() | ||||||
|  | func TrimPrefix(b, s []byte) []byte { | ||||||
|  | 	return bytes.TrimPrefix(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TrimSuffix is a direct call-through to standard library bytes.TrimSuffix() | ||||||
|  | func TrimSuffix(b, s []byte) []byte { | ||||||
|  | 	return bytes.TrimSuffix(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Equal is a direct call-through to standard library bytes.Equal() | ||||||
|  | func Equal(b, s []byte) bool { | ||||||
|  | 	return bytes.Equal(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // EqualFold is a direct call-through to standard library bytes.EqualFold() | ||||||
|  | func EqualFold(b, s []byte) bool { | ||||||
|  | 	return bytes.EqualFold(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Fields is a direct call-through to standard library bytes.Fields() | ||||||
|  | func Fields(b []byte) [][]byte { | ||||||
|  | 	return bytes.Fields(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FieldsFunc is a direct call-through to standard library bytes.FieldsFunc() | ||||||
|  | func FieldsFunc(b []byte, fn func(rune) bool) [][]byte { | ||||||
|  | 	return bytes.FieldsFunc(b, fn) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasPrefix is a direct call-through to standard library bytes.HasPrefix() | ||||||
|  | func HasPrefix(b, s []byte) bool { | ||||||
|  | 	return bytes.HasPrefix(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasSuffix is a direct call-through to standard library bytes.HasSuffix() | ||||||
|  | func HasSuffix(b, s []byte) bool { | ||||||
|  | 	return bytes.HasSuffix(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Index is a direct call-through to standard library bytes.Index() | ||||||
|  | func Index(b, s []byte) int { | ||||||
|  | 	return bytes.Index(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IndexByte is a direct call-through to standard library bytes.IndexByte() | ||||||
|  | func IndexByte(b []byte, c byte) int { | ||||||
|  | 	return bytes.IndexByte(b, c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IndexAny is a direct call-through to standard library bytes.IndexAny() | ||||||
|  | func IndexAny(b []byte, s string) int { | ||||||
|  | 	return bytes.IndexAny(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IndexRune is a direct call-through to standard library bytes.IndexRune() | ||||||
|  | func IndexRune(b []byte, r rune) int { | ||||||
|  | 	return bytes.IndexRune(b, r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IndexFunc is a direct call-through to standard library bytes.IndexFunc() | ||||||
|  | func IndexFunc(b []byte, fn func(rune) bool) int { | ||||||
|  | 	return bytes.IndexFunc(b, fn) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LastIndex is a direct call-through to standard library bytes.LastIndex() | ||||||
|  | func LastIndex(b, s []byte) int { | ||||||
|  | 	return bytes.LastIndex(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LastIndexByte is a direct call-through to standard library bytes.LastIndexByte() | ||||||
|  | func LastIndexByte(b []byte, c byte) int { | ||||||
|  | 	return bytes.LastIndexByte(b, c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LastIndexAny is a direct call-through to standard library bytes.LastIndexAny() | ||||||
|  | func LastIndexAny(b []byte, s string) int { | ||||||
|  | 	return bytes.LastIndexAny(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LastIndexFunc is a direct call-through to standard library bytes.LastIndexFunc() | ||||||
|  | func LastIndexFunc(b []byte, fn func(rune) bool) int { | ||||||
|  | 	return bytes.LastIndexFunc(b, fn) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Replace is a direct call-through to standard library bytes.Replace() | ||||||
|  | func Replace(b, s, r []byte, c int) []byte { | ||||||
|  | 	return bytes.Replace(b, s, r, c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReplaceAll is a direct call-through to standard library bytes.ReplaceAll() | ||||||
|  | func ReplaceAll(b, s, r []byte) []byte { | ||||||
|  | 	return bytes.ReplaceAll(b, s, r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Split is a direct call-through to standard library bytes.Split() | ||||||
|  | func Split(b, s []byte) [][]byte { | ||||||
|  | 	return bytes.Split(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SplitAfter is a direct call-through to standard library bytes.SplitAfter() | ||||||
|  | func SplitAfter(b, s []byte) [][]byte { | ||||||
|  | 	return bytes.SplitAfter(b, s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SplitN is a direct call-through to standard library bytes.SplitN() | ||||||
|  | func SplitN(b, s []byte, c int) [][]byte { | ||||||
|  | 	return bytes.SplitN(b, s, c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SplitAfterN is a direct call-through to standard library bytes.SplitAfterN() | ||||||
|  | func SplitAfterN(b, s []byte, c int) [][]byte { | ||||||
|  | 	return bytes.SplitAfterN(b, s, c) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewReader is a direct call-through to standard library bytes.NewReader() | ||||||
|  | func NewReader(b []byte) *bytes.Reader { | ||||||
|  | 	return bytes.NewReader(b) | ||||||
|  | } | ||||||
							
								
								
									
										11
									
								
								vendor/git.iim.gay/grufwub/go-bytes/bytesconv_table.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/git.iim.gay/grufwub/go-bytes/bytesconv_table.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,11 @@ | ||||||
|  | package bytes | ||||||
|  | 
 | ||||||
|  | // Code generated by go run bytesconv_table_gen.go; DO NOT EDIT. | ||||||
|  | // See bytesconv_table_gen.go for more information about these tables. | ||||||
|  | // | ||||||
|  | // Source: https://github.com/valyala/fasthttp/blob/master/bytes_table_gen.go | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	toLowerTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@abcdefghijklmnopqrstuvwxyz[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" | ||||||
|  | 	toUpperTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" | ||||||
|  | ) | ||||||
							
								
								
									
										39
									
								
								vendor/git.iim.gay/grufwub/go-bytes/pool.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/git.iim.gay/grufwub/go-bytes/pool.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,39 @@ | ||||||
|  | package bytes | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type SizedBufferPool struct { | ||||||
|  | 	pool sync.Pool | ||||||
|  | 	len  int | ||||||
|  | 	cap  int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SizedBufferPool) Init(len, cap int) { | ||||||
|  | 	p.pool.New = func() interface{} { | ||||||
|  | 		buf := NewBuffer(make([]byte, len, cap)) | ||||||
|  | 		return &buf | ||||||
|  | 	} | ||||||
|  | 	p.len = len | ||||||
|  | 	p.cap = cap | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SizedBufferPool) Acquire() *bytes.Buffer { | ||||||
|  | 	return p.pool.Get().(*bytes.Buffer) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SizedBufferPool) Release(buf *bytes.Buffer) { | ||||||
|  | 	// If not enough cap, ignore | ||||||
|  | 	if buf.Cap() < p.cap { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Set length to expected | ||||||
|  | 	buf.Reset() | ||||||
|  | 	buf.Grow(p.len) | ||||||
|  | 
 | ||||||
|  | 	// Place in pool | ||||||
|  | 	p.pool.Put(buf) | ||||||
|  | } | ||||||
							
								
								
									
										661
									
								
								vendor/git.iim.gay/grufwub/go-errors/LICENSE
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										661
									
								
								vendor/git.iim.gay/grufwub/go-errors/LICENSE
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,661 @@ | ||||||
|  |                     GNU AFFERO GENERAL PUBLIC LICENSE | ||||||
|  |                        Version 3, 19 November 2007 | ||||||
|  | 
 | ||||||
|  |  Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> | ||||||
|  |  Everyone is permitted to copy and distribute verbatim copies | ||||||
|  |  of this license document, but changing it is not allowed. | ||||||
|  | 
 | ||||||
|  |                             Preamble | ||||||
|  | 
 | ||||||
|  |   The GNU Affero General Public License is a free, copyleft license for | ||||||
|  | software and other kinds of works, specifically designed to ensure | ||||||
|  | cooperation with the community in the case of network server software. | ||||||
|  | 
 | ||||||
|  |   The licenses for most software and other practical works are designed | ||||||
|  | to take away your freedom to share and change the works.  By contrast, | ||||||
|  | our General Public Licenses are intended to guarantee your freedom to | ||||||
|  | share and change all versions of a program--to make sure it remains free | ||||||
|  | software for all its users. | ||||||
|  | 
 | ||||||
|  |   When we speak of free software, we are referring to freedom, not | ||||||
|  | price.  Our General Public Licenses are designed to make sure that you | ||||||
|  | have the freedom to distribute copies of free software (and charge for | ||||||
|  | them if you wish), that you receive source code or can get it if you | ||||||
|  | want it, that you can change the software or use pieces of it in new | ||||||
|  | free programs, and that you know you can do these things. | ||||||
|  | 
 | ||||||
|  |   Developers that use our General Public Licenses protect your rights | ||||||
|  | with two steps: (1) assert copyright on the software, and (2) offer | ||||||
|  | you this License which gives you legal permission to copy, distribute | ||||||
|  | and/or modify the software. | ||||||
|  | 
 | ||||||
|  |   A secondary benefit of defending all users' freedom is that | ||||||
|  | improvements made in alternate versions of the program, if they | ||||||
|  | receive widespread use, become available for other developers to | ||||||
|  | incorporate.  Many developers of free software are heartened and | ||||||
|  | encouraged by the resulting cooperation.  However, in the case of | ||||||
|  | software used on network servers, this result may fail to come about. | ||||||
|  | The GNU General Public License permits making a modified version and | ||||||
|  | letting the public access it on a server without ever releasing its | ||||||
|  | source code to the public. | ||||||
|  | 
 | ||||||
|  |   The GNU Affero General Public License is designed specifically to | ||||||
|  | ensure that, in such cases, the modified source code becomes available | ||||||
|  | to the community.  It requires the operator of a network server to | ||||||
|  | provide the source code of the modified version running there to the | ||||||
|  | users of that server.  Therefore, public use of a modified version, on | ||||||
|  | a publicly accessible server, gives the public access to the source | ||||||
|  | code of the modified version. | ||||||
|  | 
 | ||||||
|  |   An older license, called the Affero General Public License and | ||||||
|  | published by Affero, was designed to accomplish similar goals.  This is | ||||||
|  | a different license, not a version of the Affero GPL, but Affero has | ||||||
|  | released a new version of the Affero GPL which permits relicensing under | ||||||
|  | this license. | ||||||
|  | 
 | ||||||
|  |   The precise terms and conditions for copying, distribution and | ||||||
|  | modification follow. | ||||||
|  | 
 | ||||||
|  |                        TERMS AND CONDITIONS | ||||||
|  | 
 | ||||||
|  |   0. Definitions. | ||||||
|  | 
 | ||||||
|  |   "This License" refers to version 3 of the GNU Affero General Public License. | ||||||
|  | 
 | ||||||
|  |   "Copyright" also means copyright-like laws that apply to other kinds of | ||||||
|  | works, such as semiconductor masks. | ||||||
|  | 
 | ||||||
|  |   "The Program" refers to any copyrightable work licensed under this | ||||||
|  | License.  Each licensee is addressed as "you".  "Licensees" and | ||||||
|  | "recipients" may be individuals or organizations. | ||||||
|  | 
 | ||||||
|  |   To "modify" a work means to copy from or adapt all or part of the work | ||||||
|  | in a fashion requiring copyright permission, other than the making of an | ||||||
|  | exact copy.  The resulting work is called a "modified version" of the | ||||||
|  | earlier work or a work "based on" the earlier work. | ||||||
|  | 
 | ||||||
|  |   A "covered work" means either the unmodified Program or a work based | ||||||
|  | on the Program. | ||||||
|  | 
 | ||||||
|  |   To "propagate" a work means to do anything with it that, without | ||||||
|  | permission, would make you directly or secondarily liable for | ||||||
|  | infringement under applicable copyright law, except executing it on a | ||||||
|  | computer or modifying a private copy.  Propagation includes copying, | ||||||
|  | distribution (with or without modification), making available to the | ||||||
|  | public, and in some countries other activities as well. | ||||||
|  | 
 | ||||||
|  |   To "convey" a work means any kind of propagation that enables other | ||||||
|  | parties to make or receive copies.  Mere interaction with a user through | ||||||
|  | a computer network, with no transfer of a copy, is not conveying. | ||||||
|  | 
 | ||||||
|  |   An interactive user interface displays "Appropriate Legal Notices" | ||||||
|  | to the extent that it includes a convenient and prominently visible | ||||||
|  | feature that (1) displays an appropriate copyright notice, and (2) | ||||||
|  | tells the user that there is no warranty for the work (except to the | ||||||
|  | extent that warranties are provided), that licensees may convey the | ||||||
|  | work under this License, and how to view a copy of this License.  If | ||||||
|  | the interface presents a list of user commands or options, such as a | ||||||
|  | menu, a prominent item in the list meets this criterion. | ||||||
|  | 
 | ||||||
|  |   1. Source Code. | ||||||
|  | 
 | ||||||
|  |   The "source code" for a work means the preferred form of the work | ||||||
|  | for making modifications to it.  "Object code" means any non-source | ||||||
|  | form of a work. | ||||||
|  | 
 | ||||||
|  |   A "Standard Interface" means an interface that either is an official | ||||||
|  | standard defined by a recognized standards body, or, in the case of | ||||||
|  | interfaces specified for a particular programming language, one that | ||||||
|  | is widely used among developers working in that language. | ||||||
|  | 
 | ||||||
|  |   The "System Libraries" of an executable work include anything, other | ||||||
|  | than the work as a whole, that (a) is included in the normal form of | ||||||
|  | packaging a Major Component, but which is not part of that Major | ||||||
|  | Component, and (b) serves only to enable use of the work with that | ||||||
|  | Major Component, or to implement a Standard Interface for which an | ||||||
|  | implementation is available to the public in source code form.  A | ||||||
|  | "Major Component", in this context, means a major essential component | ||||||
|  | (kernel, window system, and so on) of the specific operating system | ||||||
|  | (if any) on which the executable work runs, or a compiler used to | ||||||
|  | produce the work, or an object code interpreter used to run it. | ||||||
|  | 
 | ||||||
|  |   The "Corresponding Source" for a work in object code form means all | ||||||
|  | the source code needed to generate, install, and (for an executable | ||||||
|  | work) run the object code and to modify the work, including scripts to | ||||||
|  | control those activities.  However, it does not include the work's | ||||||
|  | System Libraries, or general-purpose tools or generally available free | ||||||
|  | programs which are used unmodified in performing those activities but | ||||||
|  | which are not part of the work.  For example, Corresponding Source | ||||||
|  | includes interface definition files associated with source files for | ||||||
|  | the work, and the source code for shared libraries and dynamically | ||||||
|  | linked subprograms that the work is specifically designed to require, | ||||||
|  | such as by intimate data communication or control flow between those | ||||||
|  | subprograms and other parts of the work. | ||||||
|  | 
 | ||||||
|  |   The Corresponding Source need not include anything that users | ||||||
|  | can regenerate automatically from other parts of the Corresponding | ||||||
|  | Source. | ||||||
|  | 
 | ||||||
|  |   The Corresponding Source for a work in source code form is that | ||||||
|  | same work. | ||||||
|  | 
 | ||||||
|  |   2. Basic Permissions. | ||||||
|  | 
 | ||||||
|  |   All rights granted under this License are granted for the term of | ||||||
|  | copyright on the Program, and are irrevocable provided the stated | ||||||
|  | conditions are met.  This License explicitly affirms your unlimited | ||||||
|  | permission to run the unmodified Program.  The output from running a | ||||||
|  | covered work is covered by this License only if the output, given its | ||||||
|  | content, constitutes a covered work.  This License acknowledges your | ||||||
|  | rights of fair use or other equivalent, as provided by copyright law. | ||||||
|  | 
 | ||||||
|  |   You may make, run and propagate covered works that you do not | ||||||
|  | convey, without conditions so long as your license otherwise remains | ||||||
|  | in force.  You may convey covered works to others for the sole purpose | ||||||
|  | of having them make modifications exclusively for you, or provide you | ||||||
|  | with facilities for running those works, provided that you comply with | ||||||
|  | the terms of this License in conveying all material for which you do | ||||||
|  | not control copyright.  Those thus making or running the covered works | ||||||
|  | for you must do so exclusively on your behalf, under your direction | ||||||
|  | and control, on terms that prohibit them from making any copies of | ||||||
|  | your copyrighted material outside their relationship with you. | ||||||
|  | 
 | ||||||
|  |   Conveying under any other circumstances is permitted solely under | ||||||
|  | the conditions stated below.  Sublicensing is not allowed; section 10 | ||||||
|  | makes it unnecessary. | ||||||
|  | 
 | ||||||
|  |   3. Protecting Users' Legal Rights From Anti-Circumvention Law. | ||||||
|  | 
 | ||||||
|  |   No covered work shall be deemed part of an effective technological | ||||||
|  | measure under any applicable law fulfilling obligations under article | ||||||
|  | 11 of the WIPO copyright treaty adopted on 20 December 1996, or | ||||||
|  | similar laws prohibiting or restricting circumvention of such | ||||||
|  | measures. | ||||||
|  | 
 | ||||||
|  |   When you convey a covered work, you waive any legal power to forbid | ||||||
|  | circumvention of technological measures to the extent such circumvention | ||||||
|  | is effected by exercising rights under this License with respect to | ||||||
|  | the covered work, and you disclaim any intention to limit operation or | ||||||
|  | modification of the work as a means of enforcing, against the work's | ||||||
|  | users, your or third parties' legal rights to forbid circumvention of | ||||||
|  | technological measures. | ||||||
|  | 
 | ||||||
|  |   4. Conveying Verbatim Copies. | ||||||
|  | 
 | ||||||
|  |   You may convey verbatim copies of the Program's source code as you | ||||||
|  | receive it, in any medium, provided that you conspicuously and | ||||||
|  | appropriately publish on each copy an appropriate copyright notice; | ||||||
|  | keep intact all notices stating that this License and any | ||||||
|  | non-permissive terms added in accord with section 7 apply to the code; | ||||||
|  | keep intact all notices of the absence of any warranty; and give all | ||||||
|  | recipients a copy of this License along with the Program. | ||||||
|  | 
 | ||||||
|  |   You may charge any price or no price for each copy that you convey, | ||||||
|  | and you may offer support or warranty protection for a fee. | ||||||
|  | 
 | ||||||
|  |   5. Conveying Modified Source Versions. | ||||||
|  | 
 | ||||||
|  |   You may convey a work based on the Program, or the modifications to | ||||||
|  | produce it from the Program, in the form of source code under the | ||||||
|  | terms of section 4, provided that you also meet all of these conditions: | ||||||
|  | 
 | ||||||
|  |     a) The work must carry prominent notices stating that you modified | ||||||
|  |     it, and giving a relevant date. | ||||||
|  | 
 | ||||||
|  |     b) The work must carry prominent notices stating that it is | ||||||
|  |     released under this License and any conditions added under section | ||||||
|  |     7.  This requirement modifies the requirement in section 4 to | ||||||
|  |     "keep intact all notices". | ||||||
|  | 
 | ||||||
|  |     c) You must license the entire work, as a whole, under this | ||||||
|  |     License to anyone who comes into possession of a copy.  This | ||||||
|  |     License will therefore apply, along with any applicable section 7 | ||||||
|  |     additional terms, to the whole of the work, and all its parts, | ||||||
|  |     regardless of how they are packaged.  This License gives no | ||||||
|  |     permission to license the work in any other way, but it does not | ||||||
|  |     invalidate such permission if you have separately received it. | ||||||
|  | 
 | ||||||
|  |     d) If the work has interactive user interfaces, each must display | ||||||
|  |     Appropriate Legal Notices; however, if the Program has interactive | ||||||
|  |     interfaces that do not display Appropriate Legal Notices, your | ||||||
|  |     work need not make them do so. | ||||||
|  | 
 | ||||||
|  |   A compilation of a covered work with other separate and independent | ||||||
|  | works, which are not by their nature extensions of the covered work, | ||||||
|  | and which are not combined with it such as to form a larger program, | ||||||
|  | in or on a volume of a storage or distribution medium, is called an | ||||||
|  | "aggregate" if the compilation and its resulting copyright are not | ||||||
|  | used to limit the access or legal rights of the compilation's users | ||||||
|  | beyond what the individual works permit.  Inclusion of a covered work | ||||||
|  | in an aggregate does not cause this License to apply to the other | ||||||
|  | parts of the aggregate. | ||||||
|  | 
 | ||||||
|  |   6. Conveying Non-Source Forms. | ||||||
|  | 
 | ||||||
|  |   You may convey a covered work in object code form under the terms | ||||||
|  | of sections 4 and 5, provided that you also convey the | ||||||
|  | machine-readable Corresponding Source under the terms of this License, | ||||||
|  | in one of these ways: | ||||||
|  | 
 | ||||||
|  |     a) Convey the object code in, or embodied in, a physical product | ||||||
|  |     (including a physical distribution medium), accompanied by the | ||||||
|  |     Corresponding Source fixed on a durable physical medium | ||||||
|  |     customarily used for software interchange. | ||||||
|  | 
 | ||||||
|  |     b) Convey the object code in, or embodied in, a physical product | ||||||
|  |     (including a physical distribution medium), accompanied by a | ||||||
|  |     written offer, valid for at least three years and valid for as | ||||||
|  |     long as you offer spare parts or customer support for that product | ||||||
|  |     model, to give anyone who possesses the object code either (1) a | ||||||
|  |     copy of the Corresponding Source for all the software in the | ||||||
|  |     product that is covered by this License, on a durable physical | ||||||
|  |     medium customarily used for software interchange, for a price no | ||||||
|  |     more than your reasonable cost of physically performing this | ||||||
|  |     conveying of source, or (2) access to copy the | ||||||
|  |     Corresponding Source from a network server at no charge. | ||||||
|  | 
 | ||||||
|  |     c) Convey individual copies of the object code with a copy of the | ||||||
|  |     written offer to provide the Corresponding Source.  This | ||||||
|  |     alternative is allowed only occasionally and noncommercially, and | ||||||
|  |     only if you received the object code with such an offer, in accord | ||||||
|  |     with subsection 6b. | ||||||
|  | 
 | ||||||
|  |     d) Convey the object code by offering access from a designated | ||||||
|  |     place (gratis or for a charge), and offer equivalent access to the | ||||||
|  |     Corresponding Source in the same way through the same place at no | ||||||
|  |     further charge.  You need not require recipients to copy the | ||||||
|  |     Corresponding Source along with the object code.  If the place to | ||||||
|  |     copy the object code is a network server, the Corresponding Source | ||||||
|  |     may be on a different server (operated by you or a third party) | ||||||
|  |     that supports equivalent copying facilities, provided you maintain | ||||||
|  |     clear directions next to the object code saying where to find the | ||||||
|  |     Corresponding Source.  Regardless of what server hosts the | ||||||
|  |     Corresponding Source, you remain obligated to ensure that it is | ||||||
|  |     available for as long as needed to satisfy these requirements. | ||||||
|  | 
 | ||||||
|  |     e) Convey the object code using peer-to-peer transmission, provided | ||||||
|  |     you inform other peers where the object code and Corresponding | ||||||
|  |     Source of the work are being offered to the general public at no | ||||||
|  |     charge under subsection 6d. | ||||||
|  | 
 | ||||||
|  |   A separable portion of the object code, whose source code is excluded | ||||||
|  | from the Corresponding Source as a System Library, need not be | ||||||
|  | included in conveying the object code work. | ||||||
|  | 
 | ||||||
|  |   A "User Product" is either (1) a "consumer product", which means any | ||||||
|  | tangible personal property which is normally used for personal, family, | ||||||
|  | or household purposes, or (2) anything designed or sold for incorporation | ||||||
|  | into a dwelling.  In determining whether a product is a consumer product, | ||||||
|  | doubtful cases shall be resolved in favor of coverage.  For a particular | ||||||
|  | product received by a particular user, "normally used" refers to a | ||||||
|  | typical or common use of that class of product, regardless of the status | ||||||
|  | of the particular user or of the way in which the particular user | ||||||
|  | actually uses, or expects or is expected to use, the product.  A product | ||||||
|  | is a consumer product regardless of whether the product has substantial | ||||||
|  | commercial, industrial or non-consumer uses, unless such uses represent | ||||||
|  | the only significant mode of use of the product. | ||||||
|  | 
 | ||||||
|  |   "Installation Information" for a User Product means any methods, | ||||||
|  | procedures, authorization keys, or other information required to install | ||||||
|  | and execute modified versions of a covered work in that User Product from | ||||||
|  | a modified version of its Corresponding Source.  The information must | ||||||
|  | suffice to ensure that the continued functioning of the modified object | ||||||
|  | code is in no case prevented or interfered with solely because | ||||||
|  | modification has been made. | ||||||
|  | 
 | ||||||
|  |   If you convey an object code work under this section in, or with, or | ||||||
|  | specifically for use in, a User Product, and the conveying occurs as | ||||||
|  | part of a transaction in which the right of possession and use of the | ||||||
|  | User Product is transferred to the recipient in perpetuity or for a | ||||||
|  | fixed term (regardless of how the transaction is characterized), the | ||||||
|  | Corresponding Source conveyed under this section must be accompanied | ||||||
|  | by the Installation Information.  But this requirement does not apply | ||||||
|  | if neither you nor any third party retains the ability to install | ||||||
|  | modified object code on the User Product (for example, the work has | ||||||
|  | been installed in ROM). | ||||||
|  | 
 | ||||||
|  |   The requirement to provide Installation Information does not include a | ||||||
|  | requirement to continue to provide support service, warranty, or updates | ||||||
|  | for a work that has been modified or installed by the recipient, or for | ||||||
|  | the User Product in which it has been modified or installed.  Access to a | ||||||
|  | network may be denied when the modification itself materially and | ||||||
|  | adversely affects the operation of the network or violates the rules and | ||||||
|  | protocols for communication across the network. | ||||||
|  | 
 | ||||||
|  |   Corresponding Source conveyed, and Installation Information provided, | ||||||
|  | in accord with this section must be in a format that is publicly | ||||||
|  | documented (and with an implementation available to the public in | ||||||
|  | source code form), and must require no special password or key for | ||||||
|  | unpacking, reading or copying. | ||||||
|  | 
 | ||||||
|  |   7. Additional Terms. | ||||||
|  | 
 | ||||||
|  |   "Additional permissions" are terms that supplement the terms of this | ||||||
|  | License by making exceptions from one or more of its conditions. | ||||||
|  | Additional permissions that are applicable to the entire Program shall | ||||||
|  | be treated as though they were included in this License, to the extent | ||||||
|  | that they are valid under applicable law.  If additional permissions | ||||||
|  | apply only to part of the Program, that part may be used separately | ||||||
|  | under those permissions, but the entire Program remains governed by | ||||||
|  | this License without regard to the additional permissions. | ||||||
|  | 
 | ||||||
|  |   When you convey a copy of a covered work, you may at your option | ||||||
|  | remove any additional permissions from that copy, or from any part of | ||||||
|  | it.  (Additional permissions may be written to require their own | ||||||
|  | removal in certain cases when you modify the work.)  You may place | ||||||
|  | additional permissions on material, added by you to a covered work, | ||||||
|  | for which you have or can give appropriate copyright permission. | ||||||
|  | 
 | ||||||
|  |   Notwithstanding any other provision of this License, for material you | ||||||
|  | add to a covered work, you may (if authorized by the copyright holders of | ||||||
|  | that material) supplement the terms of this License with terms: | ||||||
|  | 
 | ||||||
|  |     a) Disclaiming warranty or limiting liability differently from the | ||||||
|  |     terms of sections 15 and 16 of this License; or | ||||||
|  | 
 | ||||||
|  |     b) Requiring preservation of specified reasonable legal notices or | ||||||
|  |     author attributions in that material or in the Appropriate Legal | ||||||
|  |     Notices displayed by works containing it; or | ||||||
|  | 
 | ||||||
|  |     c) Prohibiting misrepresentation of the origin of that material, or | ||||||
|  |     requiring that modified versions of such material be marked in | ||||||
|  |     reasonable ways as different from the original version; or | ||||||
|  | 
 | ||||||
|  |     d) Limiting the use for publicity purposes of names of licensors or | ||||||
|  |     authors of the material; or | ||||||
|  | 
 | ||||||
|  |     e) Declining to grant rights under trademark law for use of some | ||||||
|  |     trade names, trademarks, or service marks; or | ||||||
|  | 
 | ||||||
|  |     f) Requiring indemnification of licensors and authors of that | ||||||
|  |     material by anyone who conveys the material (or modified versions of | ||||||
|  |     it) with contractual assumptions of liability to the recipient, for | ||||||
|  |     any liability that these contractual assumptions directly impose on | ||||||
|  |     those licensors and authors. | ||||||
|  | 
 | ||||||
|  |   All other non-permissive additional terms are considered "further | ||||||
|  | restrictions" within the meaning of section 10.  If the Program as you | ||||||
|  | received it, or any part of it, contains a notice stating that it is | ||||||
|  | governed by this License along with a term that is a further | ||||||
|  | restriction, you may remove that term.  If a license document contains | ||||||
|  | a further restriction but permits relicensing or conveying under this | ||||||
|  | License, you may add to a covered work material governed by the terms | ||||||
|  | of that license document, provided that the further restriction does | ||||||
|  | not survive such relicensing or conveying. | ||||||
|  | 
 | ||||||
|  |   If you add terms to a covered work in accord with this section, you | ||||||
|  | must place, in the relevant source files, a statement of the | ||||||
|  | additional terms that apply to those files, or a notice indicating | ||||||
|  | where to find the applicable terms. | ||||||
|  | 
 | ||||||
|  |   Additional terms, permissive or non-permissive, may be stated in the | ||||||
|  | form of a separately written license, or stated as exceptions; | ||||||
|  | the above requirements apply either way. | ||||||
|  | 
 | ||||||
|  |   8. Termination. | ||||||
|  | 
 | ||||||
|  |   You may not propagate or modify a covered work except as expressly | ||||||
|  | provided under this License.  Any attempt otherwise to propagate or | ||||||
|  | modify it is void, and will automatically terminate your rights under | ||||||
|  | this License (including any patent licenses granted under the third | ||||||
|  | paragraph of section 11). | ||||||
|  | 
 | ||||||
|  |   However, if you cease all violation of this License, then your | ||||||
|  | license from a particular copyright holder is reinstated (a) | ||||||
|  | provisionally, unless and until the copyright holder explicitly and | ||||||
|  | finally terminates your license, and (b) permanently, if the copyright | ||||||
|  | holder fails to notify you of the violation by some reasonable means | ||||||
|  | prior to 60 days after the cessation. | ||||||
|  | 
 | ||||||
|  |   Moreover, your license from a particular copyright holder is | ||||||
|  | reinstated permanently if the copyright holder notifies you of the | ||||||
|  | violation by some reasonable means, this is the first time you have | ||||||
|  | received notice of violation of this License (for any work) from that | ||||||
|  | copyright holder, and you cure the violation prior to 30 days after | ||||||
|  | your receipt of the notice. | ||||||
|  | 
 | ||||||
|  |   Termination of your rights under this section does not terminate the | ||||||
|  | licenses of parties who have received copies or rights from you under | ||||||
|  | this License.  If your rights have been terminated and not permanently | ||||||
|  | reinstated, you do not qualify to receive new licenses for the same | ||||||
|  | material under section 10. | ||||||
|  | 
 | ||||||
|  |   9. Acceptance Not Required for Having Copies. | ||||||
|  | 
 | ||||||
|  |   You are not required to accept this License in order to receive or | ||||||
|  | run a copy of the Program.  Ancillary propagation of a covered work | ||||||
|  | occurring solely as a consequence of using peer-to-peer transmission | ||||||
|  | to receive a copy likewise does not require acceptance.  However, | ||||||
|  | nothing other than this License grants you permission to propagate or | ||||||
|  | modify any covered work.  These actions infringe copyright if you do | ||||||
|  | not accept this License.  Therefore, by modifying or propagating a | ||||||
|  | covered work, you indicate your acceptance of this License to do so. | ||||||
|  | 
 | ||||||
|  |   10. Automatic Licensing of Downstream Recipients. | ||||||
|  | 
 | ||||||
|  |   Each time you convey a covered work, the recipient automatically | ||||||
|  | receives a license from the original licensors, to run, modify and | ||||||
|  | propagate that work, subject to this License.  You are not responsible | ||||||
|  | for enforcing compliance by third parties with this License. | ||||||
|  | 
 | ||||||
|  |   An "entity transaction" is a transaction transferring control of an | ||||||
|  | organization, or substantially all assets of one, or subdividing an | ||||||
|  | organization, or merging organizations.  If propagation of a covered | ||||||
|  | work results from an entity transaction, each party to that | ||||||
|  | transaction who receives a copy of the work also receives whatever | ||||||
|  | licenses to the work the party's predecessor in interest had or could | ||||||
|  | give under the previous paragraph, plus a right to possession of the | ||||||
|  | Corresponding Source of the work from the predecessor in interest, if | ||||||
|  | the predecessor has it or can get it with reasonable efforts. | ||||||
|  | 
 | ||||||
|  |   You may not impose any further restrictions on the exercise of the | ||||||
|  | rights granted or affirmed under this License.  For example, you may | ||||||
|  | not impose a license fee, royalty, or other charge for exercise of | ||||||
|  | rights granted under this License, and you may not initiate litigation | ||||||
|  | (including a cross-claim or counterclaim in a lawsuit) alleging that | ||||||
|  | any patent claim is infringed by making, using, selling, offering for | ||||||
|  | sale, or importing the Program or any portion of it. | ||||||
|  | 
 | ||||||
|  |   11. Patents. | ||||||
|  | 
 | ||||||
|  |   A "contributor" is a copyright holder who authorizes use under this | ||||||
|  | License of the Program or a work on which the Program is based.  The | ||||||
|  | work thus licensed is called the contributor's "contributor version". | ||||||
|  | 
 | ||||||
|  |   A contributor's "essential patent claims" are all patent claims | ||||||
|  | owned or controlled by the contributor, whether already acquired or | ||||||
|  | hereafter acquired, that would be infringed by some manner, permitted | ||||||
|  | by this License, of making, using, or selling its contributor version, | ||||||
|  | but do not include claims that would be infringed only as a | ||||||
|  | consequence of further modification of the contributor version.  For | ||||||
|  | purposes of this definition, "control" includes the right to grant | ||||||
|  | patent sublicenses in a manner consistent with the requirements of | ||||||
|  | this License. | ||||||
|  | 
 | ||||||
|  |   Each contributor grants you a non-exclusive, worldwide, royalty-free | ||||||
|  | patent license under the contributor's essential patent claims, to | ||||||
|  | make, use, sell, offer for sale, import and otherwise run, modify and | ||||||
|  | propagate the contents of its contributor version. | ||||||
|  | 
 | ||||||
|  |   In the following three paragraphs, a "patent license" is any express | ||||||
|  | agreement or commitment, however denominated, not to enforce a patent | ||||||
|  | (such as an express permission to practice a patent or covenant not to | ||||||
|  | sue for patent infringement).  To "grant" such a patent license to a | ||||||
|  | party means to make such an agreement or commitment not to enforce a | ||||||
|  | patent against the party. | ||||||
|  | 
 | ||||||
|  |   If you convey a covered work, knowingly relying on a patent license, | ||||||
|  | and the Corresponding Source of the work is not available for anyone | ||||||
|  | to copy, free of charge and under the terms of this License, through a | ||||||
|  | publicly available network server or other readily accessible means, | ||||||
|  | then you must either (1) cause the Corresponding Source to be so | ||||||
|  | available, or (2) arrange to deprive yourself of the benefit of the | ||||||
|  | patent license for this particular work, or (3) arrange, in a manner | ||||||
|  | consistent with the requirements of this License, to extend the patent | ||||||
|  | license to downstream recipients.  "Knowingly relying" means you have | ||||||
|  | actual knowledge that, but for the patent license, your conveying the | ||||||
|  | covered work in a country, or your recipient's use of the covered work | ||||||
|  | in a country, would infringe one or more identifiable patents in that | ||||||
|  | country that you have reason to believe are valid. | ||||||
|  | 
 | ||||||
|  |   If, pursuant to or in connection with a single transaction or | ||||||
|  | arrangement, you convey, or propagate by procuring conveyance of, a | ||||||
|  | covered work, and grant a patent license to some of the parties | ||||||
|  | receiving the covered work authorizing them to use, propagate, modify | ||||||
|  | or convey a specific copy of the covered work, then the patent license | ||||||
|  | you grant is automatically extended to all recipients of the covered | ||||||
|  | work and works based on it. | ||||||
|  | 
 | ||||||
|  |   A patent license is "discriminatory" if it does not include within | ||||||
|  | the scope of its coverage, prohibits the exercise of, or is | ||||||
|  | conditioned on the non-exercise of one or more of the rights that are | ||||||
|  | specifically granted under this License.  You may not convey a covered | ||||||
|  | work if you are a party to an arrangement with a third party that is | ||||||
|  | in the business of distributing software, under which you make payment | ||||||
|  | to the third party based on the extent of your activity of conveying | ||||||
|  | the work, and under which the third party grants, to any of the | ||||||
|  | parties who would receive the covered work from you, a discriminatory | ||||||
|  | patent license (a) in connection with copies of the covered work | ||||||
|  | conveyed by you (or copies made from those copies), or (b) primarily | ||||||
|  | for and in connection with specific products or compilations that | ||||||
|  | contain the covered work, unless you entered into that arrangement, | ||||||
|  | or that patent license was granted, prior to 28 March 2007. | ||||||
|  | 
 | ||||||
|  |   Nothing in this License shall be construed as excluding or limiting | ||||||
|  | any implied license or other defenses to infringement that may | ||||||
|  | otherwise be available to you under applicable patent law. | ||||||
|  | 
 | ||||||
|  |   12. No Surrender of Others' Freedom. | ||||||
|  | 
 | ||||||
|  |   If conditions are imposed on you (whether by court order, agreement or | ||||||
|  | otherwise) that contradict the conditions of this License, they do not | ||||||
|  | excuse you from the conditions of this License.  If you cannot convey a | ||||||
|  | covered work so as to satisfy simultaneously your obligations under this | ||||||
|  | License and any other pertinent obligations, then as a consequence you may | ||||||
|  | not convey it at all.  For example, if you agree to terms that obligate you | ||||||
|  | to collect a royalty for further conveying from those to whom you convey | ||||||
|  | the Program, the only way you could satisfy both those terms and this | ||||||
|  | License would be to refrain entirely from conveying the Program. | ||||||
|  | 
 | ||||||
|  |   13. Remote Network Interaction; Use with the GNU General Public License. | ||||||
|  | 
 | ||||||
|  |   Notwithstanding any other provision of this License, if you modify the | ||||||
|  | Program, your modified version must prominently offer all users | ||||||
|  | interacting with it remotely through a computer network (if your version | ||||||
|  | supports such interaction) an opportunity to receive the Corresponding | ||||||
|  | Source of your version by providing access to the Corresponding Source | ||||||
|  | from a network server at no charge, through some standard or customary | ||||||
|  | means of facilitating copying of software.  This Corresponding Source | ||||||
|  | shall include the Corresponding Source for any work covered by version 3 | ||||||
|  | of the GNU General Public License that is incorporated pursuant to the | ||||||
|  | following paragraph. | ||||||
|  | 
 | ||||||
|  |   Notwithstanding any other provision of this License, you have | ||||||
|  | permission to link or combine any covered work with a work licensed | ||||||
|  | under version 3 of the GNU General Public License into a single | ||||||
|  | combined work, and to convey the resulting work.  The terms of this | ||||||
|  | License will continue to apply to the part which is the covered work, | ||||||
|  | but the work with which it is combined will remain governed by version | ||||||
|  | 3 of the GNU General Public License. | ||||||
|  | 
 | ||||||
|  |   14. Revised Versions of this License. | ||||||
|  | 
 | ||||||
|  |   The Free Software Foundation may publish revised and/or new versions of | ||||||
|  | the GNU Affero General Public License from time to time.  Such new versions | ||||||
|  | will be similar in spirit to the present version, but may differ in detail to | ||||||
|  | address new problems or concerns. | ||||||
|  | 
 | ||||||
|  |   Each version is given a distinguishing version number.  If the | ||||||
|  | Program specifies that a certain numbered version of the GNU Affero General | ||||||
|  | Public License "or any later version" applies to it, you have the | ||||||
|  | option of following the terms and conditions either of that numbered | ||||||
|  | version or of any later version published by the Free Software | ||||||
|  | Foundation.  If the Program does not specify a version number of the | ||||||
|  | GNU Affero General Public License, you may choose any version ever published | ||||||
|  | by the Free Software Foundation. | ||||||
|  | 
 | ||||||
|  |   If the Program specifies that a proxy can decide which future | ||||||
|  | versions of the GNU Affero General Public License can be used, that proxy's | ||||||
|  | public statement of acceptance of a version permanently authorizes you | ||||||
|  | to choose that version for the Program. | ||||||
|  | 
 | ||||||
|  |   Later license versions may give you additional or different | ||||||
|  | permissions.  However, no additional obligations are imposed on any | ||||||
|  | author or copyright holder as a result of your choosing to follow a | ||||||
|  | later version. | ||||||
|  | 
 | ||||||
|  |   15. Disclaimer of Warranty. | ||||||
|  | 
 | ||||||
|  |   THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY | ||||||
|  | APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT | ||||||
|  | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY | ||||||
|  | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, | ||||||
|  | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||||||
|  | PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM | ||||||
|  | IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF | ||||||
|  | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. | ||||||
|  | 
 | ||||||
|  |   16. Limitation of Liability. | ||||||
|  | 
 | ||||||
|  |   IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING | ||||||
|  | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS | ||||||
|  | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY | ||||||
|  | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE | ||||||
|  | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF | ||||||
|  | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD | ||||||
|  | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), | ||||||
|  | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF | ||||||
|  | SUCH DAMAGES. | ||||||
|  | 
 | ||||||
|  |   17. Interpretation of Sections 15 and 16. | ||||||
|  | 
 | ||||||
|  |   If the disclaimer of warranty and limitation of liability provided | ||||||
|  | above cannot be given local legal effect according to their terms, | ||||||
|  | reviewing courts shall apply local law that most closely approximates | ||||||
|  | an absolute waiver of all civil liability in connection with the | ||||||
|  | Program, unless a warranty or assumption of liability accompanies a | ||||||
|  | copy of the Program in return for a fee. | ||||||
|  | 
 | ||||||
|  |                      END OF TERMS AND CONDITIONS | ||||||
|  | 
 | ||||||
|  |             How to Apply These Terms to Your New Programs | ||||||
|  | 
 | ||||||
|  |   If you develop a new program, and you want it to be of the greatest | ||||||
|  | possible use to the public, the best way to achieve this is to make it | ||||||
|  | free software which everyone can redistribute and change under these terms. | ||||||
|  | 
 | ||||||
|  |   To do so, attach the following notices to the program.  It is safest | ||||||
|  | to attach them to the start of each source file to most effectively | ||||||
|  | state the exclusion of warranty; and each file should have at least | ||||||
|  | the "copyright" line and a pointer to where the full notice is found. | ||||||
|  | 
 | ||||||
|  |     <one line to give the program's name and a brief idea of what it does.> | ||||||
|  |     Copyright (C) <year>  <name of author> | ||||||
|  | 
 | ||||||
|  |     This program is free software: you can redistribute it and/or modify | ||||||
|  |     it under the terms of the GNU Affero General Public License as published | ||||||
|  |     by the Free Software Foundation, either version 3 of the License, or | ||||||
|  |     (at your option) any later version. | ||||||
|  | 
 | ||||||
|  |     This program is distributed in the hope that it will be useful, | ||||||
|  |     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  |     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  |     GNU Affero General Public License for more details. | ||||||
|  | 
 | ||||||
|  |     You should have received a copy of the GNU Affero General Public License | ||||||
|  |     along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||||
|  | 
 | ||||||
|  | Also add information on how to contact you by electronic and paper mail. | ||||||
|  | 
 | ||||||
|  |   If your software can interact with users remotely through a computer | ||||||
|  | network, you should also make sure that it provides a way for users to | ||||||
|  | get its source.  For example, if your program is a web application, its | ||||||
|  | interface could display a "Source" link that leads users to an archive | ||||||
|  | of the code.  There are many ways you could offer source, and different | ||||||
|  | solutions will be better for different programs; see section 13 for the | ||||||
|  | specific requirements. | ||||||
|  | 
 | ||||||
|  |   You should also get your employer (if you work as a programmer) or school, | ||||||
|  | if any, to sign a "copyright disclaimer" for the program, if necessary. | ||||||
|  | For more information on this, and how to apply and follow the GNU AGPL, see | ||||||
|  | <https://www.gnu.org/licenses/>. | ||||||
							
								
								
									
										7
									
								
								vendor/git.iim.gay/grufwub/go-errors/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								vendor/git.iim.gay/grufwub/go-errors/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,7 @@ | ||||||
|  | [](https://godocs.io/github.com/grufwub/go-errors) | ||||||
|  | 
 | ||||||
|  | An error library that allows wrapping of one error with another, | ||||||
|  | extending with further information and preserving all wrapped errors | ||||||
|  | for comparisons. | ||||||
|  | 
 | ||||||
|  | Where possible this library wraps standard errors library functions. | ||||||
							
								
								
									
										192
									
								
								vendor/git.iim.gay/grufwub/go-errors/errors.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										192
									
								
								vendor/git.iim.gay/grufwub/go-errors/errors.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,192 @@ | ||||||
|  | package errors | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	_ Definition = definition("") | ||||||
|  | 	_ Error      = &derivedError{} | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // BaseError defines a simple error implementation | ||||||
|  | type BaseError interface { | ||||||
|  | 	// Error returns the error string | ||||||
|  | 	Error() string | ||||||
|  | 
 | ||||||
|  | 	// Is checks whether an error is equal to this one | ||||||
|  | 	Is(error) bool | ||||||
|  | 
 | ||||||
|  | 	// Unwrap attempts to unwrap any contained errors | ||||||
|  | 	Unwrap() error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Definition describes an error implementation that allows creating | ||||||
|  | // errors derived from this. e.g. global errors defined at runtime | ||||||
|  | // that are called with `.New()` or `.Wrap()` to derive new errors with | ||||||
|  | // extra contextual information when needed | ||||||
|  | type Definition interface { | ||||||
|  | 	// New returns a new Error based on Definition using | ||||||
|  | 	// supplied string as contextual information | ||||||
|  | 	New(a ...interface{}) Error | ||||||
|  | 
 | ||||||
|  | 	// Newf returns a new Error based on Definition using | ||||||
|  | 	// supplied format string as contextual information | ||||||
|  | 	Newf(string, ...interface{}) Error | ||||||
|  | 
 | ||||||
|  | 	// Wrap returns a new Error, wrapping supplied error with | ||||||
|  | 	// a wrapper with definition as the outer error | ||||||
|  | 	Wrap(error) Error | ||||||
|  | 
 | ||||||
|  | 	// must implement BaseError | ||||||
|  | 	BaseError | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error defines an error implementation that supports wrapping errors, easily | ||||||
|  | // accessing inner / outer errors in the wrapping structure, and setting extra | ||||||
|  | // contextual information related to this error | ||||||
|  | type Error interface { | ||||||
|  | 	// Outer returns the outermost error | ||||||
|  | 	Outer() error | ||||||
|  | 
 | ||||||
|  | 	// Extra allows you to set extra contextual information. Please note | ||||||
|  | 	// that multiple calls to .Extra() will overwrite previously set information | ||||||
|  | 	Extra(...interface{}) Error | ||||||
|  | 
 | ||||||
|  | 	// Extraf allows you to set extra contextual information using a format string. | ||||||
|  | 	// Please note that multiple calls to .Extraf() will overwrite previously set | ||||||
|  | 	// information | ||||||
|  | 	Extraf(string, ...interface{}) Error | ||||||
|  | 
 | ||||||
|  | 	// must implement BaseError | ||||||
|  | 	BaseError | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New returns a simple error implementation. This exists so that `go-errors` can | ||||||
|  | // be a drop-in replacement for the standard "errors" library | ||||||
|  | func New(msg string) error { | ||||||
|  | 	return definition(msg) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Define returns a new error Definition | ||||||
|  | func Define(msg string) Definition { | ||||||
|  | 	return definition(msg) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Wrap wraps the supplied inner error within a struct of the outer error | ||||||
|  | func Wrap(outer, inner error) Error { | ||||||
|  | 	// If this is a wrapped error but inner is nil, use this | ||||||
|  | 	if derived, ok := outer.(*derivedError); ok && derived.inner == nil { | ||||||
|  | 		derived.inner = inner | ||||||
|  | 		return derived | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Create new derived error | ||||||
|  | 	return &derivedError{ | ||||||
|  | 		msg:   "", | ||||||
|  | 		extra: "", | ||||||
|  | 		outer: outer, | ||||||
|  | 		inner: inner, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type definition string | ||||||
|  | 
 | ||||||
|  | func (e definition) New(a ...interface{}) Error { | ||||||
|  | 	return &derivedError{ | ||||||
|  | 		msg:   fmt.Sprint(a...), | ||||||
|  | 		extra: "", | ||||||
|  | 		inner: nil, | ||||||
|  | 		outer: e, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e definition) Newf(msg string, a ...interface{}) Error { | ||||||
|  | 	return &derivedError{ | ||||||
|  | 		msg:   fmt.Sprintf(msg, a...), | ||||||
|  | 		extra: "", | ||||||
|  | 		inner: nil, | ||||||
|  | 		outer: e, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e definition) Wrap(err error) Error { | ||||||
|  | 	return &derivedError{ | ||||||
|  | 		msg:   "", | ||||||
|  | 		extra: "", | ||||||
|  | 		inner: err, | ||||||
|  | 		outer: e, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e definition) Error() string { | ||||||
|  | 	return string(e) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e definition) Is(err error) bool { | ||||||
|  | 	switch err := err.(type) { | ||||||
|  | 	case definition: | ||||||
|  | 		return e == err | ||||||
|  | 	case *derivedError: | ||||||
|  | 		return err.Is(e) | ||||||
|  | 	default: | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e definition) Unwrap() error { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type derivedError struct { | ||||||
|  | 	msg   string // msg provides the set message for this derived error | ||||||
|  | 	extra string // extra provides any extra set contextual information | ||||||
|  | 	inner error  // inner is the error being wrapped | ||||||
|  | 	outer error  // outer is the outmost error in this wrapper | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *derivedError) Error() string { | ||||||
|  | 	// Error starts with outer error | ||||||
|  | 	s := e.outer.Error() + ` (` | ||||||
|  | 
 | ||||||
|  | 	// Add any message | ||||||
|  | 	if e.msg != "" { | ||||||
|  | 		s += `msg="` + e.msg + `" ` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Add any wrapped error | ||||||
|  | 	if e.inner != nil { | ||||||
|  | 		s += `wrapped="` + e.inner.Error() + `" ` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Add any extra information | ||||||
|  | 	if e.extra != "" { | ||||||
|  | 		s += `extra="` + e.extra + `" ` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return error string | ||||||
|  | 	return s[:len(s)-1] + `)` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *derivedError) Is(err error) bool { | ||||||
|  | 	return errors.Is(e.outer, err) || errors.Is(e.inner, err) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *derivedError) Outer() error { | ||||||
|  | 	return e.outer | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *derivedError) Unwrap() error { | ||||||
|  | 	return e.inner | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *derivedError) Extra(a ...interface{}) Error { | ||||||
|  | 	e.extra = fmt.Sprint(a...) | ||||||
|  | 	return e | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *derivedError) Extraf(s string, a ...interface{}) Error { | ||||||
|  | 	e.extra = fmt.Sprintf(s, a...) | ||||||
|  | 	return e | ||||||
|  | } | ||||||
							
								
								
									
										45
									
								
								vendor/git.iim.gay/grufwub/go-errors/once.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								vendor/git.iim.gay/grufwub/go-errors/once.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,45 @@ | ||||||
|  | package errors | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync/atomic" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // OnceError is an error structure that supports safe multi-threaded | ||||||
|  | // usage and setting only once (until reset) | ||||||
|  | type OnceError struct { | ||||||
|  | 	err unsafe.Pointer | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewOnce returns a new OnceError instance | ||||||
|  | func NewOnce() OnceError { | ||||||
|  | 	return OnceError{ | ||||||
|  | 		err: nil, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *OnceError) Store(err error) { | ||||||
|  | 	// Nothing to do | ||||||
|  | 	if err == nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Only set if not already | ||||||
|  | 	atomic.CompareAndSwapPointer( | ||||||
|  | 		&e.err, | ||||||
|  | 		nil, | ||||||
|  | 		unsafe.Pointer(&err), | ||||||
|  | 	) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *OnceError) Load() error { | ||||||
|  | 	return *(*error)(atomic.LoadPointer(&e.err)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *OnceError) IsSet() bool { | ||||||
|  | 	return (atomic.LoadPointer(&e.err) != nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (e *OnceError) Reset() { | ||||||
|  | 	atomic.StorePointer(&e.err, nil) | ||||||
|  | } | ||||||
							
								
								
									
										18
									
								
								vendor/git.iim.gay/grufwub/go-errors/std.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								vendor/git.iim.gay/grufwub/go-errors/std.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,18 @@ | ||||||
|  | package errors | ||||||
|  | 
 | ||||||
|  | import "errors" | ||||||
|  | 
 | ||||||
|  | // Is wraps "errors".Is() | ||||||
|  | func Is(err, target error) bool { | ||||||
|  | 	return errors.Is(err, target) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // As wraps "errors".As() | ||||||
|  | func As(err error, target interface{}) bool { | ||||||
|  | 	return errors.As(err, target) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Unwrap wraps "errors".Unwrap() | ||||||
|  | func Unwrap(err error) error { | ||||||
|  | 	return errors.Unwrap(err) | ||||||
|  | } | ||||||
							
								
								
									
										1
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1 @@ | ||||||
|  | HashEncoder provides a means of quickly hash-summing and encoding data | ||||||
							
								
								
									
										42
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/enc.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/enc.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,42 @@ | ||||||
|  | package hashenc | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"encoding/base32" | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"encoding/hex" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Encoder defines an interface for encoding binary data | ||||||
|  | type Encoder interface { | ||||||
|  | 	// Encode encodes the data at src into dst | ||||||
|  | 	Encode(dst []byte, src []byte) | ||||||
|  | 
 | ||||||
|  | 	// EncodedLen returns the encoded length for input data of supplied length | ||||||
|  | 	EncodedLen(int) int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Base32 returns a new base32 Encoder | ||||||
|  | func Base32() Encoder { | ||||||
|  | 	return base32.StdEncoding.WithPadding(base64.NoPadding) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Base64 returns a new base64 Encoder | ||||||
|  | func Base64() Encoder { | ||||||
|  | 	return base64.URLEncoding.WithPadding(base64.NoPadding) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Hex returns a new hex Encoder | ||||||
|  | func Hex() Encoder { | ||||||
|  | 	return &hexEncoder{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // hexEncoder simply provides an empty receiver to satisfy Encoder | ||||||
|  | type hexEncoder struct{} | ||||||
|  | 
 | ||||||
|  | func (*hexEncoder) Encode(dst []byte, src []byte) { | ||||||
|  | 	hex.Encode(dst, src) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (*hexEncoder) EncodedLen(len int) int { | ||||||
|  | 	return hex.EncodedLen(len) | ||||||
|  | } | ||||||
							
								
								
									
										136
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/hash.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										136
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/hash.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,136 @@ | ||||||
|  | package hashenc | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"crypto/md5" | ||||||
|  | 	"crypto/sha1" | ||||||
|  | 	"crypto/sha256" | ||||||
|  | 	"crypto/sha512" | ||||||
|  | 	"hash" | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Hash defines a pooled hash.Hash implementation | ||||||
|  | type Hash interface { | ||||||
|  | 	// Hash ensures we implement the base hash.Hash implementation | ||||||
|  | 	hash.Hash | ||||||
|  | 
 | ||||||
|  | 	// Release resets the Hash and places it back in the pool | ||||||
|  | 	Release() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // poolHash is our Hash implementation, providing a hash.Hash and a pool to return to | ||||||
|  | type poolHash struct { | ||||||
|  | 	hash.Hash | ||||||
|  | 	pool *sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (h *poolHash) Release() { | ||||||
|  | 	h.Reset() | ||||||
|  | 	h.pool.Put(h) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SHA512Pool defines a pool of SHA512 hashes | ||||||
|  | type SHA512Pool interface { | ||||||
|  | 	// SHA512 returns a Hash implementing the SHA512 hashing algorithm | ||||||
|  | 	SHA512() Hash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewSHA512Pool returns a new SHA512Pool implementation | ||||||
|  | func NewSHA512Pool() SHA512Pool { | ||||||
|  | 	p := &sha512Pool{} | ||||||
|  | 	p.New = func() interface{} { | ||||||
|  | 		return &poolHash{ | ||||||
|  | 			Hash: sha512.New(), | ||||||
|  | 			pool: &p.Pool, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sha512Pool is our SHA512Pool implementation, simply wrapping sync.Pool | ||||||
|  | type sha512Pool struct { | ||||||
|  | 	sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *sha512Pool) SHA512() Hash { | ||||||
|  | 	return p.Get().(Hash) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SHA256Pool defines a pool of SHA256 hashes | ||||||
|  | type SHA256Pool interface { | ||||||
|  | 	// SHA256 returns a Hash implementing the SHA256 hashing algorithm | ||||||
|  | 	SHA256() Hash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewSHA256Pool returns a new SHA256Pool implementation | ||||||
|  | func NewSHA256Pool() SHA256Pool { | ||||||
|  | 	p := &sha256Pool{} | ||||||
|  | 	p.New = func() interface{} { | ||||||
|  | 		return &poolHash{ | ||||||
|  | 			Hash: sha256.New(), | ||||||
|  | 			pool: &p.Pool, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sha256Pool is our SHA256Pool implementation, simply wrapping sync.Pool | ||||||
|  | type sha256Pool struct { | ||||||
|  | 	sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *sha256Pool) SHA256() Hash { | ||||||
|  | 	return p.Get().(Hash) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SHA1Pool defines a pool of SHA1 hashes | ||||||
|  | type SHA1Pool interface { | ||||||
|  | 	SHA1() Hash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewSHA1Pool returns a new SHA1Pool implementation | ||||||
|  | func NewSHA1Pool() SHA1Pool { | ||||||
|  | 	p := &sha1Pool{} | ||||||
|  | 	p.New = func() interface{} { | ||||||
|  | 		return &poolHash{ | ||||||
|  | 			Hash: sha1.New(), | ||||||
|  | 			pool: &p.Pool, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sha1Pool is our SHA1Pool implementation, simply wrapping sync.Pool | ||||||
|  | type sha1Pool struct { | ||||||
|  | 	sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *sha1Pool) SHA1() Hash { | ||||||
|  | 	return p.Get().(Hash) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // MD5Pool defines a pool of MD5 hashes | ||||||
|  | type MD5Pool interface { | ||||||
|  | 	MD5() Hash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewMD5Pool returns a new MD5 implementation | ||||||
|  | func NewMD5Pool() MD5Pool { | ||||||
|  | 	p := &md5Pool{} | ||||||
|  | 	p.New = func() interface{} { | ||||||
|  | 		return &poolHash{ | ||||||
|  | 			Hash: md5.New(), | ||||||
|  | 			pool: &p.Pool, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // md5Pool is our MD5Pool implementation, simply wrapping sync.Pool | ||||||
|  | type md5Pool struct { | ||||||
|  | 	sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *md5Pool) MD5() Hash { | ||||||
|  | 	return p.Get().(Hash) | ||||||
|  | } | ||||||
							
								
								
									
										58
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/hashenc.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								vendor/git.iim.gay/grufwub/go-hashenc/hashenc.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,58 @@ | ||||||
|  | package hashenc | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"hash" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-bytes" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // HashEncoder defines an interface for calculating encoded hash sums of binary data | ||||||
|  | type HashEncoder interface { | ||||||
|  | 	// EncodeSum calculates the hash sum of src and encodes (at most) Size() into dst | ||||||
|  | 	EncodeSum(dst []byte, src []byte) | ||||||
|  | 
 | ||||||
|  | 	// EncodedSum calculates the encoded hash sum of src and returns data in a newly allocated bytes.Bytes | ||||||
|  | 	EncodedSum(src []byte) bytes.Bytes | ||||||
|  | 
 | ||||||
|  | 	// Size returns the expected length of encoded hashes | ||||||
|  | 	Size() int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New returns a new HashEncoder instance based on supplied hash.Hash and Encoder supplying functions | ||||||
|  | func New(hash hash.Hash, enc Encoder) HashEncoder { | ||||||
|  | 	hashSize := hash.Size() | ||||||
|  | 	return &henc{ | ||||||
|  | 		hash: hash, | ||||||
|  | 		hbuf: make([]byte, hashSize), | ||||||
|  | 		enc:  enc, | ||||||
|  | 		size: enc.EncodedLen(hashSize), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // henc is the HashEncoder implementation | ||||||
|  | type henc struct { | ||||||
|  | 	hash hash.Hash | ||||||
|  | 	hbuf []byte | ||||||
|  | 	enc  Encoder | ||||||
|  | 	size int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (henc *henc) EncodeSum(dst []byte, src []byte) { | ||||||
|  | 	// Hash supplied bytes | ||||||
|  | 	henc.hash.Reset() | ||||||
|  | 	henc.hash.Write(src) | ||||||
|  | 	henc.hbuf = henc.hash.Sum(henc.hbuf[:0]) | ||||||
|  | 
 | ||||||
|  | 	// Encode the hashsum and return a copy | ||||||
|  | 	henc.enc.Encode(dst, henc.hbuf) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (henc *henc) EncodedSum(src []byte) bytes.Bytes { | ||||||
|  | 	dst := make([]byte, henc.size) | ||||||
|  | 	henc.EncodeSum(dst, src) | ||||||
|  | 	return bytes.ToBytes(dst) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (henc *henc) Size() int { | ||||||
|  | 	return henc.size | ||||||
|  | } | ||||||
							
								
								
									
										661
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/LICENSE
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										661
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/LICENSE
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,661 @@ | ||||||
|  |                     GNU AFFERO GENERAL PUBLIC LICENSE | ||||||
|  |                        Version 3, 19 November 2007 | ||||||
|  | 
 | ||||||
|  |  Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> | ||||||
|  |  Everyone is permitted to copy and distribute verbatim copies | ||||||
|  |  of this license document, but changing it is not allowed. | ||||||
|  | 
 | ||||||
|  |                             Preamble | ||||||
|  | 
 | ||||||
|  |   The GNU Affero General Public License is a free, copyleft license for | ||||||
|  | software and other kinds of works, specifically designed to ensure | ||||||
|  | cooperation with the community in the case of network server software. | ||||||
|  | 
 | ||||||
|  |   The licenses for most software and other practical works are designed | ||||||
|  | to take away your freedom to share and change the works.  By contrast, | ||||||
|  | our General Public Licenses are intended to guarantee your freedom to | ||||||
|  | share and change all versions of a program--to make sure it remains free | ||||||
|  | software for all its users. | ||||||
|  | 
 | ||||||
|  |   When we speak of free software, we are referring to freedom, not | ||||||
|  | price.  Our General Public Licenses are designed to make sure that you | ||||||
|  | have the freedom to distribute copies of free software (and charge for | ||||||
|  | them if you wish), that you receive source code or can get it if you | ||||||
|  | want it, that you can change the software or use pieces of it in new | ||||||
|  | free programs, and that you know you can do these things. | ||||||
|  | 
 | ||||||
|  |   Developers that use our General Public Licenses protect your rights | ||||||
|  | with two steps: (1) assert copyright on the software, and (2) offer | ||||||
|  | you this License which gives you legal permission to copy, distribute | ||||||
|  | and/or modify the software. | ||||||
|  | 
 | ||||||
|  |   A secondary benefit of defending all users' freedom is that | ||||||
|  | improvements made in alternate versions of the program, if they | ||||||
|  | receive widespread use, become available for other developers to | ||||||
|  | incorporate.  Many developers of free software are heartened and | ||||||
|  | encouraged by the resulting cooperation.  However, in the case of | ||||||
|  | software used on network servers, this result may fail to come about. | ||||||
|  | The GNU General Public License permits making a modified version and | ||||||
|  | letting the public access it on a server without ever releasing its | ||||||
|  | source code to the public. | ||||||
|  | 
 | ||||||
|  |   The GNU Affero General Public License is designed specifically to | ||||||
|  | ensure that, in such cases, the modified source code becomes available | ||||||
|  | to the community.  It requires the operator of a network server to | ||||||
|  | provide the source code of the modified version running there to the | ||||||
|  | users of that server.  Therefore, public use of a modified version, on | ||||||
|  | a publicly accessible server, gives the public access to the source | ||||||
|  | code of the modified version. | ||||||
|  | 
 | ||||||
|  |   An older license, called the Affero General Public License and | ||||||
|  | published by Affero, was designed to accomplish similar goals.  This is | ||||||
|  | a different license, not a version of the Affero GPL, but Affero has | ||||||
|  | released a new version of the Affero GPL which permits relicensing under | ||||||
|  | this license. | ||||||
|  | 
 | ||||||
|  |   The precise terms and conditions for copying, distribution and | ||||||
|  | modification follow. | ||||||
|  | 
 | ||||||
|  |                        TERMS AND CONDITIONS | ||||||
|  | 
 | ||||||
|  |   0. Definitions. | ||||||
|  | 
 | ||||||
|  |   "This License" refers to version 3 of the GNU Affero General Public License. | ||||||
|  | 
 | ||||||
|  |   "Copyright" also means copyright-like laws that apply to other kinds of | ||||||
|  | works, such as semiconductor masks. | ||||||
|  | 
 | ||||||
|  |   "The Program" refers to any copyrightable work licensed under this | ||||||
|  | License.  Each licensee is addressed as "you".  "Licensees" and | ||||||
|  | "recipients" may be individuals or organizations. | ||||||
|  | 
 | ||||||
|  |   To "modify" a work means to copy from or adapt all or part of the work | ||||||
|  | in a fashion requiring copyright permission, other than the making of an | ||||||
|  | exact copy.  The resulting work is called a "modified version" of the | ||||||
|  | earlier work or a work "based on" the earlier work. | ||||||
|  | 
 | ||||||
|  |   A "covered work" means either the unmodified Program or a work based | ||||||
|  | on the Program. | ||||||
|  | 
 | ||||||
|  |   To "propagate" a work means to do anything with it that, without | ||||||
|  | permission, would make you directly or secondarily liable for | ||||||
|  | infringement under applicable copyright law, except executing it on a | ||||||
|  | computer or modifying a private copy.  Propagation includes copying, | ||||||
|  | distribution (with or without modification), making available to the | ||||||
|  | public, and in some countries other activities as well. | ||||||
|  | 
 | ||||||
|  |   To "convey" a work means any kind of propagation that enables other | ||||||
|  | parties to make or receive copies.  Mere interaction with a user through | ||||||
|  | a computer network, with no transfer of a copy, is not conveying. | ||||||
|  | 
 | ||||||
|  |   An interactive user interface displays "Appropriate Legal Notices" | ||||||
|  | to the extent that it includes a convenient and prominently visible | ||||||
|  | feature that (1) displays an appropriate copyright notice, and (2) | ||||||
|  | tells the user that there is no warranty for the work (except to the | ||||||
|  | extent that warranties are provided), that licensees may convey the | ||||||
|  | work under this License, and how to view a copy of this License.  If | ||||||
|  | the interface presents a list of user commands or options, such as a | ||||||
|  | menu, a prominent item in the list meets this criterion. | ||||||
|  | 
 | ||||||
|  |   1. Source Code. | ||||||
|  | 
 | ||||||
|  |   The "source code" for a work means the preferred form of the work | ||||||
|  | for making modifications to it.  "Object code" means any non-source | ||||||
|  | form of a work. | ||||||
|  | 
 | ||||||
|  |   A "Standard Interface" means an interface that either is an official | ||||||
|  | standard defined by a recognized standards body, or, in the case of | ||||||
|  | interfaces specified for a particular programming language, one that | ||||||
|  | is widely used among developers working in that language. | ||||||
|  | 
 | ||||||
|  |   The "System Libraries" of an executable work include anything, other | ||||||
|  | than the work as a whole, that (a) is included in the normal form of | ||||||
|  | packaging a Major Component, but which is not part of that Major | ||||||
|  | Component, and (b) serves only to enable use of the work with that | ||||||
|  | Major Component, or to implement a Standard Interface for which an | ||||||
|  | implementation is available to the public in source code form.  A | ||||||
|  | "Major Component", in this context, means a major essential component | ||||||
|  | (kernel, window system, and so on) of the specific operating system | ||||||
|  | (if any) on which the executable work runs, or a compiler used to | ||||||
|  | produce the work, or an object code interpreter used to run it. | ||||||
|  | 
 | ||||||
|  |   The "Corresponding Source" for a work in object code form means all | ||||||
|  | the source code needed to generate, install, and (for an executable | ||||||
|  | work) run the object code and to modify the work, including scripts to | ||||||
|  | control those activities.  However, it does not include the work's | ||||||
|  | System Libraries, or general-purpose tools or generally available free | ||||||
|  | programs which are used unmodified in performing those activities but | ||||||
|  | which are not part of the work.  For example, Corresponding Source | ||||||
|  | includes interface definition files associated with source files for | ||||||
|  | the work, and the source code for shared libraries and dynamically | ||||||
|  | linked subprograms that the work is specifically designed to require, | ||||||
|  | such as by intimate data communication or control flow between those | ||||||
|  | subprograms and other parts of the work. | ||||||
|  | 
 | ||||||
|  |   The Corresponding Source need not include anything that users | ||||||
|  | can regenerate automatically from other parts of the Corresponding | ||||||
|  | Source. | ||||||
|  | 
 | ||||||
|  |   The Corresponding Source for a work in source code form is that | ||||||
|  | same work. | ||||||
|  | 
 | ||||||
|  |   2. Basic Permissions. | ||||||
|  | 
 | ||||||
|  |   All rights granted under this License are granted for the term of | ||||||
|  | copyright on the Program, and are irrevocable provided the stated | ||||||
|  | conditions are met.  This License explicitly affirms your unlimited | ||||||
|  | permission to run the unmodified Program.  The output from running a | ||||||
|  | covered work is covered by this License only if the output, given its | ||||||
|  | content, constitutes a covered work.  This License acknowledges your | ||||||
|  | rights of fair use or other equivalent, as provided by copyright law. | ||||||
|  | 
 | ||||||
|  |   You may make, run and propagate covered works that you do not | ||||||
|  | convey, without conditions so long as your license otherwise remains | ||||||
|  | in force.  You may convey covered works to others for the sole purpose | ||||||
|  | of having them make modifications exclusively for you, or provide you | ||||||
|  | with facilities for running those works, provided that you comply with | ||||||
|  | the terms of this License in conveying all material for which you do | ||||||
|  | not control copyright.  Those thus making or running the covered works | ||||||
|  | for you must do so exclusively on your behalf, under your direction | ||||||
|  | and control, on terms that prohibit them from making any copies of | ||||||
|  | your copyrighted material outside their relationship with you. | ||||||
|  | 
 | ||||||
|  |   Conveying under any other circumstances is permitted solely under | ||||||
|  | the conditions stated below.  Sublicensing is not allowed; section 10 | ||||||
|  | makes it unnecessary. | ||||||
|  | 
 | ||||||
|  |   3. Protecting Users' Legal Rights From Anti-Circumvention Law. | ||||||
|  | 
 | ||||||
|  |   No covered work shall be deemed part of an effective technological | ||||||
|  | measure under any applicable law fulfilling obligations under article | ||||||
|  | 11 of the WIPO copyright treaty adopted on 20 December 1996, or | ||||||
|  | similar laws prohibiting or restricting circumvention of such | ||||||
|  | measures. | ||||||
|  | 
 | ||||||
|  |   When you convey a covered work, you waive any legal power to forbid | ||||||
|  | circumvention of technological measures to the extent such circumvention | ||||||
|  | is effected by exercising rights under this License with respect to | ||||||
|  | the covered work, and you disclaim any intention to limit operation or | ||||||
|  | modification of the work as a means of enforcing, against the work's | ||||||
|  | users, your or third parties' legal rights to forbid circumvention of | ||||||
|  | technological measures. | ||||||
|  | 
 | ||||||
|  |   4. Conveying Verbatim Copies. | ||||||
|  | 
 | ||||||
|  |   You may convey verbatim copies of the Program's source code as you | ||||||
|  | receive it, in any medium, provided that you conspicuously and | ||||||
|  | appropriately publish on each copy an appropriate copyright notice; | ||||||
|  | keep intact all notices stating that this License and any | ||||||
|  | non-permissive terms added in accord with section 7 apply to the code; | ||||||
|  | keep intact all notices of the absence of any warranty; and give all | ||||||
|  | recipients a copy of this License along with the Program. | ||||||
|  | 
 | ||||||
|  |   You may charge any price or no price for each copy that you convey, | ||||||
|  | and you may offer support or warranty protection for a fee. | ||||||
|  | 
 | ||||||
|  |   5. Conveying Modified Source Versions. | ||||||
|  | 
 | ||||||
|  |   You may convey a work based on the Program, or the modifications to | ||||||
|  | produce it from the Program, in the form of source code under the | ||||||
|  | terms of section 4, provided that you also meet all of these conditions: | ||||||
|  | 
 | ||||||
|  |     a) The work must carry prominent notices stating that you modified | ||||||
|  |     it, and giving a relevant date. | ||||||
|  | 
 | ||||||
|  |     b) The work must carry prominent notices stating that it is | ||||||
|  |     released under this License and any conditions added under section | ||||||
|  |     7.  This requirement modifies the requirement in section 4 to | ||||||
|  |     "keep intact all notices". | ||||||
|  | 
 | ||||||
|  |     c) You must license the entire work, as a whole, under this | ||||||
|  |     License to anyone who comes into possession of a copy.  This | ||||||
|  |     License will therefore apply, along with any applicable section 7 | ||||||
|  |     additional terms, to the whole of the work, and all its parts, | ||||||
|  |     regardless of how they are packaged.  This License gives no | ||||||
|  |     permission to license the work in any other way, but it does not | ||||||
|  |     invalidate such permission if you have separately received it. | ||||||
|  | 
 | ||||||
|  |     d) If the work has interactive user interfaces, each must display | ||||||
|  |     Appropriate Legal Notices; however, if the Program has interactive | ||||||
|  |     interfaces that do not display Appropriate Legal Notices, your | ||||||
|  |     work need not make them do so. | ||||||
|  | 
 | ||||||
|  |   A compilation of a covered work with other separate and independent | ||||||
|  | works, which are not by their nature extensions of the covered work, | ||||||
|  | and which are not combined with it such as to form a larger program, | ||||||
|  | in or on a volume of a storage or distribution medium, is called an | ||||||
|  | "aggregate" if the compilation and its resulting copyright are not | ||||||
|  | used to limit the access or legal rights of the compilation's users | ||||||
|  | beyond what the individual works permit.  Inclusion of a covered work | ||||||
|  | in an aggregate does not cause this License to apply to the other | ||||||
|  | parts of the aggregate. | ||||||
|  | 
 | ||||||
|  |   6. Conveying Non-Source Forms. | ||||||
|  | 
 | ||||||
|  |   You may convey a covered work in object code form under the terms | ||||||
|  | of sections 4 and 5, provided that you also convey the | ||||||
|  | machine-readable Corresponding Source under the terms of this License, | ||||||
|  | in one of these ways: | ||||||
|  | 
 | ||||||
|  |     a) Convey the object code in, or embodied in, a physical product | ||||||
|  |     (including a physical distribution medium), accompanied by the | ||||||
|  |     Corresponding Source fixed on a durable physical medium | ||||||
|  |     customarily used for software interchange. | ||||||
|  | 
 | ||||||
|  |     b) Convey the object code in, or embodied in, a physical product | ||||||
|  |     (including a physical distribution medium), accompanied by a | ||||||
|  |     written offer, valid for at least three years and valid for as | ||||||
|  |     long as you offer spare parts or customer support for that product | ||||||
|  |     model, to give anyone who possesses the object code either (1) a | ||||||
|  |     copy of the Corresponding Source for all the software in the | ||||||
|  |     product that is covered by this License, on a durable physical | ||||||
|  |     medium customarily used for software interchange, for a price no | ||||||
|  |     more than your reasonable cost of physically performing this | ||||||
|  |     conveying of source, or (2) access to copy the | ||||||
|  |     Corresponding Source from a network server at no charge. | ||||||
|  | 
 | ||||||
|  |     c) Convey individual copies of the object code with a copy of the | ||||||
|  |     written offer to provide the Corresponding Source.  This | ||||||
|  |     alternative is allowed only occasionally and noncommercially, and | ||||||
|  |     only if you received the object code with such an offer, in accord | ||||||
|  |     with subsection 6b. | ||||||
|  | 
 | ||||||
|  |     d) Convey the object code by offering access from a designated | ||||||
|  |     place (gratis or for a charge), and offer equivalent access to the | ||||||
|  |     Corresponding Source in the same way through the same place at no | ||||||
|  |     further charge.  You need not require recipients to copy the | ||||||
|  |     Corresponding Source along with the object code.  If the place to | ||||||
|  |     copy the object code is a network server, the Corresponding Source | ||||||
|  |     may be on a different server (operated by you or a third party) | ||||||
|  |     that supports equivalent copying facilities, provided you maintain | ||||||
|  |     clear directions next to the object code saying where to find the | ||||||
|  |     Corresponding Source.  Regardless of what server hosts the | ||||||
|  |     Corresponding Source, you remain obligated to ensure that it is | ||||||
|  |     available for as long as needed to satisfy these requirements. | ||||||
|  | 
 | ||||||
|  |     e) Convey the object code using peer-to-peer transmission, provided | ||||||
|  |     you inform other peers where the object code and Corresponding | ||||||
|  |     Source of the work are being offered to the general public at no | ||||||
|  |     charge under subsection 6d. | ||||||
|  | 
 | ||||||
|  |   A separable portion of the object code, whose source code is excluded | ||||||
|  | from the Corresponding Source as a System Library, need not be | ||||||
|  | included in conveying the object code work. | ||||||
|  | 
 | ||||||
|  |   A "User Product" is either (1) a "consumer product", which means any | ||||||
|  | tangible personal property which is normally used for personal, family, | ||||||
|  | or household purposes, or (2) anything designed or sold for incorporation | ||||||
|  | into a dwelling.  In determining whether a product is a consumer product, | ||||||
|  | doubtful cases shall be resolved in favor of coverage.  For a particular | ||||||
|  | product received by a particular user, "normally used" refers to a | ||||||
|  | typical or common use of that class of product, regardless of the status | ||||||
|  | of the particular user or of the way in which the particular user | ||||||
|  | actually uses, or expects or is expected to use, the product.  A product | ||||||
|  | is a consumer product regardless of whether the product has substantial | ||||||
|  | commercial, industrial or non-consumer uses, unless such uses represent | ||||||
|  | the only significant mode of use of the product. | ||||||
|  | 
 | ||||||
|  |   "Installation Information" for a User Product means any methods, | ||||||
|  | procedures, authorization keys, or other information required to install | ||||||
|  | and execute modified versions of a covered work in that User Product from | ||||||
|  | a modified version of its Corresponding Source.  The information must | ||||||
|  | suffice to ensure that the continued functioning of the modified object | ||||||
|  | code is in no case prevented or interfered with solely because | ||||||
|  | modification has been made. | ||||||
|  | 
 | ||||||
|  |   If you convey an object code work under this section in, or with, or | ||||||
|  | specifically for use in, a User Product, and the conveying occurs as | ||||||
|  | part of a transaction in which the right of possession and use of the | ||||||
|  | User Product is transferred to the recipient in perpetuity or for a | ||||||
|  | fixed term (regardless of how the transaction is characterized), the | ||||||
|  | Corresponding Source conveyed under this section must be accompanied | ||||||
|  | by the Installation Information.  But this requirement does not apply | ||||||
|  | if neither you nor any third party retains the ability to install | ||||||
|  | modified object code on the User Product (for example, the work has | ||||||
|  | been installed in ROM). | ||||||
|  | 
 | ||||||
|  |   The requirement to provide Installation Information does not include a | ||||||
|  | requirement to continue to provide support service, warranty, or updates | ||||||
|  | for a work that has been modified or installed by the recipient, or for | ||||||
|  | the User Product in which it has been modified or installed.  Access to a | ||||||
|  | network may be denied when the modification itself materially and | ||||||
|  | adversely affects the operation of the network or violates the rules and | ||||||
|  | protocols for communication across the network. | ||||||
|  | 
 | ||||||
|  |   Corresponding Source conveyed, and Installation Information provided, | ||||||
|  | in accord with this section must be in a format that is publicly | ||||||
|  | documented (and with an implementation available to the public in | ||||||
|  | source code form), and must require no special password or key for | ||||||
|  | unpacking, reading or copying. | ||||||
|  | 
 | ||||||
|  |   7. Additional Terms. | ||||||
|  | 
 | ||||||
|  |   "Additional permissions" are terms that supplement the terms of this | ||||||
|  | License by making exceptions from one or more of its conditions. | ||||||
|  | Additional permissions that are applicable to the entire Program shall | ||||||
|  | be treated as though they were included in this License, to the extent | ||||||
|  | that they are valid under applicable law.  If additional permissions | ||||||
|  | apply only to part of the Program, that part may be used separately | ||||||
|  | under those permissions, but the entire Program remains governed by | ||||||
|  | this License without regard to the additional permissions. | ||||||
|  | 
 | ||||||
|  |   When you convey a copy of a covered work, you may at your option | ||||||
|  | remove any additional permissions from that copy, or from any part of | ||||||
|  | it.  (Additional permissions may be written to require their own | ||||||
|  | removal in certain cases when you modify the work.)  You may place | ||||||
|  | additional permissions on material, added by you to a covered work, | ||||||
|  | for which you have or can give appropriate copyright permission. | ||||||
|  | 
 | ||||||
|  |   Notwithstanding any other provision of this License, for material you | ||||||
|  | add to a covered work, you may (if authorized by the copyright holders of | ||||||
|  | that material) supplement the terms of this License with terms: | ||||||
|  | 
 | ||||||
|  |     a) Disclaiming warranty or limiting liability differently from the | ||||||
|  |     terms of sections 15 and 16 of this License; or | ||||||
|  | 
 | ||||||
|  |     b) Requiring preservation of specified reasonable legal notices or | ||||||
|  |     author attributions in that material or in the Appropriate Legal | ||||||
|  |     Notices displayed by works containing it; or | ||||||
|  | 
 | ||||||
|  |     c) Prohibiting misrepresentation of the origin of that material, or | ||||||
|  |     requiring that modified versions of such material be marked in | ||||||
|  |     reasonable ways as different from the original version; or | ||||||
|  | 
 | ||||||
|  |     d) Limiting the use for publicity purposes of names of licensors or | ||||||
|  |     authors of the material; or | ||||||
|  | 
 | ||||||
|  |     e) Declining to grant rights under trademark law for use of some | ||||||
|  |     trade names, trademarks, or service marks; or | ||||||
|  | 
 | ||||||
|  |     f) Requiring indemnification of licensors and authors of that | ||||||
|  |     material by anyone who conveys the material (or modified versions of | ||||||
|  |     it) with contractual assumptions of liability to the recipient, for | ||||||
|  |     any liability that these contractual assumptions directly impose on | ||||||
|  |     those licensors and authors. | ||||||
|  | 
 | ||||||
|  |   All other non-permissive additional terms are considered "further | ||||||
|  | restrictions" within the meaning of section 10.  If the Program as you | ||||||
|  | received it, or any part of it, contains a notice stating that it is | ||||||
|  | governed by this License along with a term that is a further | ||||||
|  | restriction, you may remove that term.  If a license document contains | ||||||
|  | a further restriction but permits relicensing or conveying under this | ||||||
|  | License, you may add to a covered work material governed by the terms | ||||||
|  | of that license document, provided that the further restriction does | ||||||
|  | not survive such relicensing or conveying. | ||||||
|  | 
 | ||||||
|  |   If you add terms to a covered work in accord with this section, you | ||||||
|  | must place, in the relevant source files, a statement of the | ||||||
|  | additional terms that apply to those files, or a notice indicating | ||||||
|  | where to find the applicable terms. | ||||||
|  | 
 | ||||||
|  |   Additional terms, permissive or non-permissive, may be stated in the | ||||||
|  | form of a separately written license, or stated as exceptions; | ||||||
|  | the above requirements apply either way. | ||||||
|  | 
 | ||||||
|  |   8. Termination. | ||||||
|  | 
 | ||||||
|  |   You may not propagate or modify a covered work except as expressly | ||||||
|  | provided under this License.  Any attempt otherwise to propagate or | ||||||
|  | modify it is void, and will automatically terminate your rights under | ||||||
|  | this License (including any patent licenses granted under the third | ||||||
|  | paragraph of section 11). | ||||||
|  | 
 | ||||||
|  |   However, if you cease all violation of this License, then your | ||||||
|  | license from a particular copyright holder is reinstated (a) | ||||||
|  | provisionally, unless and until the copyright holder explicitly and | ||||||
|  | finally terminates your license, and (b) permanently, if the copyright | ||||||
|  | holder fails to notify you of the violation by some reasonable means | ||||||
|  | prior to 60 days after the cessation. | ||||||
|  | 
 | ||||||
|  |   Moreover, your license from a particular copyright holder is | ||||||
|  | reinstated permanently if the copyright holder notifies you of the | ||||||
|  | violation by some reasonable means, this is the first time you have | ||||||
|  | received notice of violation of this License (for any work) from that | ||||||
|  | copyright holder, and you cure the violation prior to 30 days after | ||||||
|  | your receipt of the notice. | ||||||
|  | 
 | ||||||
|  |   Termination of your rights under this section does not terminate the | ||||||
|  | licenses of parties who have received copies or rights from you under | ||||||
|  | this License.  If your rights have been terminated and not permanently | ||||||
|  | reinstated, you do not qualify to receive new licenses for the same | ||||||
|  | material under section 10. | ||||||
|  | 
 | ||||||
|  |   9. Acceptance Not Required for Having Copies. | ||||||
|  | 
 | ||||||
|  |   You are not required to accept this License in order to receive or | ||||||
|  | run a copy of the Program.  Ancillary propagation of a covered work | ||||||
|  | occurring solely as a consequence of using peer-to-peer transmission | ||||||
|  | to receive a copy likewise does not require acceptance.  However, | ||||||
|  | nothing other than this License grants you permission to propagate or | ||||||
|  | modify any covered work.  These actions infringe copyright if you do | ||||||
|  | not accept this License.  Therefore, by modifying or propagating a | ||||||
|  | covered work, you indicate your acceptance of this License to do so. | ||||||
|  | 
 | ||||||
|  |   10. Automatic Licensing of Downstream Recipients. | ||||||
|  | 
 | ||||||
|  |   Each time you convey a covered work, the recipient automatically | ||||||
|  | receives a license from the original licensors, to run, modify and | ||||||
|  | propagate that work, subject to this License.  You are not responsible | ||||||
|  | for enforcing compliance by third parties with this License. | ||||||
|  | 
 | ||||||
|  |   An "entity transaction" is a transaction transferring control of an | ||||||
|  | organization, or substantially all assets of one, or subdividing an | ||||||
|  | organization, or merging organizations.  If propagation of a covered | ||||||
|  | work results from an entity transaction, each party to that | ||||||
|  | transaction who receives a copy of the work also receives whatever | ||||||
|  | licenses to the work the party's predecessor in interest had or could | ||||||
|  | give under the previous paragraph, plus a right to possession of the | ||||||
|  | Corresponding Source of the work from the predecessor in interest, if | ||||||
|  | the predecessor has it or can get it with reasonable efforts. | ||||||
|  | 
 | ||||||
|  |   You may not impose any further restrictions on the exercise of the | ||||||
|  | rights granted or affirmed under this License.  For example, you may | ||||||
|  | not impose a license fee, royalty, or other charge for exercise of | ||||||
|  | rights granted under this License, and you may not initiate litigation | ||||||
|  | (including a cross-claim or counterclaim in a lawsuit) alleging that | ||||||
|  | any patent claim is infringed by making, using, selling, offering for | ||||||
|  | sale, or importing the Program or any portion of it. | ||||||
|  | 
 | ||||||
|  |   11. Patents. | ||||||
|  | 
 | ||||||
|  |   A "contributor" is a copyright holder who authorizes use under this | ||||||
|  | License of the Program or a work on which the Program is based.  The | ||||||
|  | work thus licensed is called the contributor's "contributor version". | ||||||
|  | 
 | ||||||
|  |   A contributor's "essential patent claims" are all patent claims | ||||||
|  | owned or controlled by the contributor, whether already acquired or | ||||||
|  | hereafter acquired, that would be infringed by some manner, permitted | ||||||
|  | by this License, of making, using, or selling its contributor version, | ||||||
|  | but do not include claims that would be infringed only as a | ||||||
|  | consequence of further modification of the contributor version.  For | ||||||
|  | purposes of this definition, "control" includes the right to grant | ||||||
|  | patent sublicenses in a manner consistent with the requirements of | ||||||
|  | this License. | ||||||
|  | 
 | ||||||
|  |   Each contributor grants you a non-exclusive, worldwide, royalty-free | ||||||
|  | patent license under the contributor's essential patent claims, to | ||||||
|  | make, use, sell, offer for sale, import and otherwise run, modify and | ||||||
|  | propagate the contents of its contributor version. | ||||||
|  | 
 | ||||||
|  |   In the following three paragraphs, a "patent license" is any express | ||||||
|  | agreement or commitment, however denominated, not to enforce a patent | ||||||
|  | (such as an express permission to practice a patent or covenant not to | ||||||
|  | sue for patent infringement).  To "grant" such a patent license to a | ||||||
|  | party means to make such an agreement or commitment not to enforce a | ||||||
|  | patent against the party. | ||||||
|  | 
 | ||||||
|  |   If you convey a covered work, knowingly relying on a patent license, | ||||||
|  | and the Corresponding Source of the work is not available for anyone | ||||||
|  | to copy, free of charge and under the terms of this License, through a | ||||||
|  | publicly available network server or other readily accessible means, | ||||||
|  | then you must either (1) cause the Corresponding Source to be so | ||||||
|  | available, or (2) arrange to deprive yourself of the benefit of the | ||||||
|  | patent license for this particular work, or (3) arrange, in a manner | ||||||
|  | consistent with the requirements of this License, to extend the patent | ||||||
|  | license to downstream recipients.  "Knowingly relying" means you have | ||||||
|  | actual knowledge that, but for the patent license, your conveying the | ||||||
|  | covered work in a country, or your recipient's use of the covered work | ||||||
|  | in a country, would infringe one or more identifiable patents in that | ||||||
|  | country that you have reason to believe are valid. | ||||||
|  | 
 | ||||||
|  |   If, pursuant to or in connection with a single transaction or | ||||||
|  | arrangement, you convey, or propagate by procuring conveyance of, a | ||||||
|  | covered work, and grant a patent license to some of the parties | ||||||
|  | receiving the covered work authorizing them to use, propagate, modify | ||||||
|  | or convey a specific copy of the covered work, then the patent license | ||||||
|  | you grant is automatically extended to all recipients of the covered | ||||||
|  | work and works based on it. | ||||||
|  | 
 | ||||||
|  |   A patent license is "discriminatory" if it does not include within | ||||||
|  | the scope of its coverage, prohibits the exercise of, or is | ||||||
|  | conditioned on the non-exercise of one or more of the rights that are | ||||||
|  | specifically granted under this License.  You may not convey a covered | ||||||
|  | work if you are a party to an arrangement with a third party that is | ||||||
|  | in the business of distributing software, under which you make payment | ||||||
|  | to the third party based on the extent of your activity of conveying | ||||||
|  | the work, and under which the third party grants, to any of the | ||||||
|  | parties who would receive the covered work from you, a discriminatory | ||||||
|  | patent license (a) in connection with copies of the covered work | ||||||
|  | conveyed by you (or copies made from those copies), or (b) primarily | ||||||
|  | for and in connection with specific products or compilations that | ||||||
|  | contain the covered work, unless you entered into that arrangement, | ||||||
|  | or that patent license was granted, prior to 28 March 2007. | ||||||
|  | 
 | ||||||
|  |   Nothing in this License shall be construed as excluding or limiting | ||||||
|  | any implied license or other defenses to infringement that may | ||||||
|  | otherwise be available to you under applicable patent law. | ||||||
|  | 
 | ||||||
|  |   12. No Surrender of Others' Freedom. | ||||||
|  | 
 | ||||||
|  |   If conditions are imposed on you (whether by court order, agreement or | ||||||
|  | otherwise) that contradict the conditions of this License, they do not | ||||||
|  | excuse you from the conditions of this License.  If you cannot convey a | ||||||
|  | covered work so as to satisfy simultaneously your obligations under this | ||||||
|  | License and any other pertinent obligations, then as a consequence you may | ||||||
|  | not convey it at all.  For example, if you agree to terms that obligate you | ||||||
|  | to collect a royalty for further conveying from those to whom you convey | ||||||
|  | the Program, the only way you could satisfy both those terms and this | ||||||
|  | License would be to refrain entirely from conveying the Program. | ||||||
|  | 
 | ||||||
|  |   13. Remote Network Interaction; Use with the GNU General Public License. | ||||||
|  | 
 | ||||||
|  |   Notwithstanding any other provision of this License, if you modify the | ||||||
|  | Program, your modified version must prominently offer all users | ||||||
|  | interacting with it remotely through a computer network (if your version | ||||||
|  | supports such interaction) an opportunity to receive the Corresponding | ||||||
|  | Source of your version by providing access to the Corresponding Source | ||||||
|  | from a network server at no charge, through some standard or customary | ||||||
|  | means of facilitating copying of software.  This Corresponding Source | ||||||
|  | shall include the Corresponding Source for any work covered by version 3 | ||||||
|  | of the GNU General Public License that is incorporated pursuant to the | ||||||
|  | following paragraph. | ||||||
|  | 
 | ||||||
|  |   Notwithstanding any other provision of this License, you have | ||||||
|  | permission to link or combine any covered work with a work licensed | ||||||
|  | under version 3 of the GNU General Public License into a single | ||||||
|  | combined work, and to convey the resulting work.  The terms of this | ||||||
|  | License will continue to apply to the part which is the covered work, | ||||||
|  | but the work with which it is combined will remain governed by version | ||||||
|  | 3 of the GNU General Public License. | ||||||
|  | 
 | ||||||
|  |   14. Revised Versions of this License. | ||||||
|  | 
 | ||||||
|  |   The Free Software Foundation may publish revised and/or new versions of | ||||||
|  | the GNU Affero General Public License from time to time.  Such new versions | ||||||
|  | will be similar in spirit to the present version, but may differ in detail to | ||||||
|  | address new problems or concerns. | ||||||
|  | 
 | ||||||
|  |   Each version is given a distinguishing version number.  If the | ||||||
|  | Program specifies that a certain numbered version of the GNU Affero General | ||||||
|  | Public License "or any later version" applies to it, you have the | ||||||
|  | option of following the terms and conditions either of that numbered | ||||||
|  | version or of any later version published by the Free Software | ||||||
|  | Foundation.  If the Program does not specify a version number of the | ||||||
|  | GNU Affero General Public License, you may choose any version ever published | ||||||
|  | by the Free Software Foundation. | ||||||
|  | 
 | ||||||
|  |   If the Program specifies that a proxy can decide which future | ||||||
|  | versions of the GNU Affero General Public License can be used, that proxy's | ||||||
|  | public statement of acceptance of a version permanently authorizes you | ||||||
|  | to choose that version for the Program. | ||||||
|  | 
 | ||||||
|  |   Later license versions may give you additional or different | ||||||
|  | permissions.  However, no additional obligations are imposed on any | ||||||
|  | author or copyright holder as a result of your choosing to follow a | ||||||
|  | later version. | ||||||
|  | 
 | ||||||
|  |   15. Disclaimer of Warranty. | ||||||
|  | 
 | ||||||
|  |   THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY | ||||||
|  | APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT | ||||||
|  | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY | ||||||
|  | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, | ||||||
|  | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||||||
|  | PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM | ||||||
|  | IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF | ||||||
|  | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. | ||||||
|  | 
 | ||||||
|  |   16. Limitation of Liability. | ||||||
|  | 
 | ||||||
|  |   IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING | ||||||
|  | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS | ||||||
|  | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY | ||||||
|  | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE | ||||||
|  | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF | ||||||
|  | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD | ||||||
|  | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), | ||||||
|  | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF | ||||||
|  | SUCH DAMAGES. | ||||||
|  | 
 | ||||||
|  |   17. Interpretation of Sections 15 and 16. | ||||||
|  | 
 | ||||||
|  |   If the disclaimer of warranty and limitation of liability provided | ||||||
|  | above cannot be given local legal effect according to their terms, | ||||||
|  | reviewing courts shall apply local law that most closely approximates | ||||||
|  | an absolute waiver of all civil liability in connection with the | ||||||
|  | Program, unless a warranty or assumption of liability accompanies a | ||||||
|  | copy of the Program in return for a fee. | ||||||
|  | 
 | ||||||
|  |                      END OF TERMS AND CONDITIONS | ||||||
|  | 
 | ||||||
|  |             How to Apply These Terms to Your New Programs | ||||||
|  | 
 | ||||||
|  |   If you develop a new program, and you want it to be of the greatest | ||||||
|  | possible use to the public, the best way to achieve this is to make it | ||||||
|  | free software which everyone can redistribute and change under these terms. | ||||||
|  | 
 | ||||||
|  |   To do so, attach the following notices to the program.  It is safest | ||||||
|  | to attach them to the start of each source file to most effectively | ||||||
|  | state the exclusion of warranty; and each file should have at least | ||||||
|  | the "copyright" line and a pointer to where the full notice is found. | ||||||
|  | 
 | ||||||
|  |     <one line to give the program's name and a brief idea of what it does.> | ||||||
|  |     Copyright (C) <year>  <name of author> | ||||||
|  | 
 | ||||||
|  |     This program is free software: you can redistribute it and/or modify | ||||||
|  |     it under the terms of the GNU Affero General Public License as published | ||||||
|  |     by the Free Software Foundation, either version 3 of the License, or | ||||||
|  |     (at your option) any later version. | ||||||
|  | 
 | ||||||
|  |     This program is distributed in the hope that it will be useful, | ||||||
|  |     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  |     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  |     GNU Affero General Public License for more details. | ||||||
|  | 
 | ||||||
|  |     You should have received a copy of the GNU Affero General Public License | ||||||
|  |     along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||||
|  | 
 | ||||||
|  | Also add information on how to contact you by electronic and paper mail. | ||||||
|  | 
 | ||||||
|  |   If your software can interact with users remotely through a computer | ||||||
|  | network, you should also make sure that it provides a way for users to | ||||||
|  | get its source.  For example, if your program is a web application, its | ||||||
|  | interface could display a "Source" link that leads users to an archive | ||||||
|  | of the code.  There are many ways you could offer source, and different | ||||||
|  | solutions will be better for different programs; see section 13 for the | ||||||
|  | specific requirements. | ||||||
|  | 
 | ||||||
|  |   You should also get your employer (if you work as a programmer) or school, | ||||||
|  | if any, to sign a "copyright disclaimer" for the program, if necessary. | ||||||
|  | For more information on this, and how to apply and follow the GNU AGPL, see | ||||||
|  | <https://www.gnu.org/licenses/>. | ||||||
							
								
								
									
										1
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1 @@ | ||||||
|  | Library that provides more complex mutex implementations than default libraries | ||||||
							
								
								
									
										113
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/map.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/map.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,113 @@ | ||||||
|  | package mutexes | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // MutexMap is a structure that allows having a map of self-evicting mutexes | ||||||
|  | // by key. You do not need to worry about managing the contents of the map, | ||||||
|  | // only requesting RLock/Lock for keys, and ensuring to call the returned | ||||||
|  | // unlock functions. | ||||||
|  | type MutexMap interface { | ||||||
|  | 	// Lock acquires a mutex lock for supplied key, returning an Unlock function | ||||||
|  | 	Lock(key string) (unlock func()) | ||||||
|  | 
 | ||||||
|  | 	// RLock acquires a mutex read lock for supplied key, returning an RUnlock function | ||||||
|  | 	RLock(key string) (runlock func()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type mutexMap struct { | ||||||
|  | 	// NOTE: | ||||||
|  | 	// Individual keyed mutexes should ONLY ever | ||||||
|  | 	// be locked within the protection of the outer | ||||||
|  | 	// mapMu lock. If you lock these outside the | ||||||
|  | 	// protection of this, there is a chance for | ||||||
|  | 	// deadlocks | ||||||
|  | 
 | ||||||
|  | 	mus   map[string]RWMutex | ||||||
|  | 	mapMu sync.Mutex | ||||||
|  | 	pool  sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewMap returns a new MutexMap instance based on supplied | ||||||
|  | // RWMutex allocator function, nil implies use default | ||||||
|  | func NewMap(newFn func() RWMutex) MutexMap { | ||||||
|  | 	if newFn == nil { | ||||||
|  | 		newFn = NewRW | ||||||
|  | 	} | ||||||
|  | 	return &mutexMap{ | ||||||
|  | 		mus:   make(map[string]RWMutex), | ||||||
|  | 		mapMu: sync.Mutex{}, | ||||||
|  | 		pool: sync.Pool{ | ||||||
|  | 			New: func() interface{} { | ||||||
|  | 				return newFn() | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mm *mutexMap) evict(key string, mu RWMutex) { | ||||||
|  | 	// Acquire map lock | ||||||
|  | 	mm.mapMu.Lock() | ||||||
|  | 
 | ||||||
|  | 	// Toggle mutex lock to | ||||||
|  | 	// ensure it is unused | ||||||
|  | 	unlock := mu.Lock() | ||||||
|  | 	unlock() | ||||||
|  | 
 | ||||||
|  | 	// Delete mutex key | ||||||
|  | 	delete(mm.mus, key) | ||||||
|  | 	mm.mapMu.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Release to pool | ||||||
|  | 	mm.pool.Put(mu) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetRLock acquires a mutex read lock for supplied key, returning an RUnlock function | ||||||
|  | func (mm *mutexMap) RLock(key string) func() { | ||||||
|  | 	return mm.getLock(key, func(mu RWMutex) func() { | ||||||
|  | 		return mu.RLock() | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetLock acquires a mutex lock for supplied key, returning an Unlock function | ||||||
|  | func (mm *mutexMap) Lock(key string) func() { | ||||||
|  | 	return mm.getLock(key, func(mu RWMutex) func() { | ||||||
|  | 		return mu.Lock() | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mm *mutexMap) getLock(key string, doLock func(RWMutex) func()) func() { | ||||||
|  | 	// Get map lock | ||||||
|  | 	mm.mapMu.Lock() | ||||||
|  | 
 | ||||||
|  | 	// Look for mutex | ||||||
|  | 	mu, ok := mm.mus[key] | ||||||
|  | 	if ok { | ||||||
|  | 		// Lock and return | ||||||
|  | 		// its unlocker func | ||||||
|  | 		unlock := doLock(mu) | ||||||
|  | 		mm.mapMu.Unlock() | ||||||
|  | 		return unlock | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Note: even though the mutex data structure is | ||||||
|  | 	// small, benchmarking does actually show that pooled | ||||||
|  | 	// alloc of mutexes here is faster | ||||||
|  | 
 | ||||||
|  | 	// Acquire mu + add | ||||||
|  | 	mu = mm.pool.Get().(RWMutex) | ||||||
|  | 	mm.mus[key] = mu | ||||||
|  | 
 | ||||||
|  | 	// Lock mutex + unlock map | ||||||
|  | 	unlockFn := doLock(mu) | ||||||
|  | 	mm.mapMu.Unlock() | ||||||
|  | 
 | ||||||
|  | 	return func() { | ||||||
|  | 		// Unlock mutex | ||||||
|  | 		unlockFn() | ||||||
|  | 
 | ||||||
|  | 		// Release function | ||||||
|  | 		go mm.evict(key, mu) | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										105
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/mutex.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										105
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/mutex.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,105 @@ | ||||||
|  | package mutexes | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Mutex defines a wrappable mutex. By forcing unlocks | ||||||
|  | // via returned function it makes wrapping much easier | ||||||
|  | type Mutex interface { | ||||||
|  | 	// Lock performs a mutex lock, returning an unlock function | ||||||
|  | 	Lock() (unlock func()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RWMutex defines a wrappable read-write mutex. By forcing | ||||||
|  | // unlocks via returned functions it makes wrapping much easier | ||||||
|  | type RWMutex interface { | ||||||
|  | 	Mutex | ||||||
|  | 
 | ||||||
|  | 	// RLock performs a mutex read lock, returning an unlock function | ||||||
|  | 	RLock() (runlock func()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New returns a new base Mutex implementation | ||||||
|  | func New() Mutex { | ||||||
|  | 	return &baseMutex{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewRW returns a new base RWMutex implementation | ||||||
|  | func NewRW() RWMutex { | ||||||
|  | 	return &baseRWMutex{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithFunc wraps the supplied Mutex to call the provided hooks on lock / unlock | ||||||
|  | func WithFunc(mu Mutex, onLock, onUnlock func()) Mutex { | ||||||
|  | 	return &fnMutex{mu: mu, lo: onLock, un: onUnlock} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithFuncRW wrapps the supplied RWMutex to call the provided hooks on lock / rlock / unlock/ runlock | ||||||
|  | func WithFuncRW(mu RWMutex, onLock, onRLock, onUnlock, onRUnlock func()) RWMutex { | ||||||
|  | 	return &fnRWMutex{mu: mu, lo: onLock, rlo: onRLock, un: onUnlock, run: onRUnlock} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // baseMutex simply wraps a sync.Mutex to implement our Mutex interface | ||||||
|  | type baseMutex struct{ mu sync.Mutex } | ||||||
|  | 
 | ||||||
|  | func (mu *baseMutex) Lock() func() { | ||||||
|  | 	mu.mu.Lock() | ||||||
|  | 	return mu.mu.Unlock | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // baseRWMutex simply wraps a sync.RWMutex to implement our RWMutex interface | ||||||
|  | type baseRWMutex struct{ mu sync.RWMutex } | ||||||
|  | 
 | ||||||
|  | func (mu *baseRWMutex) Lock() func() { | ||||||
|  | 	mu.mu.Lock() | ||||||
|  | 	return mu.mu.Unlock | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *baseRWMutex) RLock() func() { | ||||||
|  | 	mu.mu.RLock() | ||||||
|  | 	return mu.mu.RUnlock | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // fnMutex wraps a Mutex to add hooks for Lock and Unlock | ||||||
|  | type fnMutex struct { | ||||||
|  | 	mu Mutex | ||||||
|  | 	lo func() | ||||||
|  | 	un func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *fnMutex) Lock() func() { | ||||||
|  | 	unlock := mu.mu.Lock() | ||||||
|  | 	mu.lo() | ||||||
|  | 	return func() { | ||||||
|  | 		mu.un() | ||||||
|  | 		unlock() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // fnRWMutex wraps a RWMutex to add hooks for Lock, RLock, Unlock and RUnlock | ||||||
|  | type fnRWMutex struct { | ||||||
|  | 	mu  RWMutex | ||||||
|  | 	lo  func() | ||||||
|  | 	rlo func() | ||||||
|  | 	un  func() | ||||||
|  | 	run func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *fnRWMutex) Lock() func() { | ||||||
|  | 	unlock := mu.mu.Lock() | ||||||
|  | 	mu.lo() | ||||||
|  | 	return func() { | ||||||
|  | 		mu.un() | ||||||
|  | 		unlock() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *fnRWMutex) RLock() func() { | ||||||
|  | 	unlock := mu.mu.RLock() | ||||||
|  | 	mu.rlo() | ||||||
|  | 	return func() { | ||||||
|  | 		mu.run() | ||||||
|  | 		unlock() | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										39
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/mutex_safe.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/mutex_safe.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,39 @@ | ||||||
|  | package mutexes | ||||||
|  | 
 | ||||||
|  | import "sync" | ||||||
|  | 
 | ||||||
|  | // WithSafety wrapps the supplied Mutex to protect unlock fns | ||||||
|  | // from being called multiple times | ||||||
|  | func WithSafety(mu Mutex) Mutex { | ||||||
|  | 	return &safeMutex{mu: mu} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithSafetyRW wrapps the supplied RWMutex to protect unlock | ||||||
|  | // fns from being called multiple times | ||||||
|  | func WithSafetyRW(mu RWMutex) RWMutex { | ||||||
|  | 	return &safeRWMutex{mu: mu} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // safeMutex simply wraps a Mutex to add multi-unlock safety | ||||||
|  | type safeMutex struct{ mu Mutex } | ||||||
|  | 
 | ||||||
|  | func (mu *safeMutex) Lock() func() { | ||||||
|  | 	unlock := mu.mu.Lock() | ||||||
|  | 	once := sync.Once{} | ||||||
|  | 	return func() { once.Do(unlock) } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // safeRWMutex simply wraps a RWMutex to add multi-unlock safety | ||||||
|  | type safeRWMutex struct{ mu RWMutex } | ||||||
|  | 
 | ||||||
|  | func (mu *safeRWMutex) Lock() func() { | ||||||
|  | 	unlock := mu.mu.Lock() | ||||||
|  | 	once := sync.Once{} | ||||||
|  | 	return func() { once.Do(unlock) } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *safeRWMutex) RLock() func() { | ||||||
|  | 	unlock := mu.mu.RLock() | ||||||
|  | 	once := sync.Once{} | ||||||
|  | 	return func() { once.Do(unlock) } | ||||||
|  | } | ||||||
							
								
								
									
										104
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/mutex_timeout.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								vendor/git.iim.gay/grufwub/go-mutexes/mutex_timeout.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,104 @@ | ||||||
|  | package mutexes | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-nowish" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // TimeoutMutex defines a Mutex with timeouts on locks | ||||||
|  | type TimeoutMutex interface { | ||||||
|  | 	Mutex | ||||||
|  | 
 | ||||||
|  | 	// LockFunc is functionally the same as Lock(), but allows setting a custom hook called on timeout | ||||||
|  | 	LockFunc(func()) func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TimeoutRWMutex defines a RWMutex with timeouts on locks | ||||||
|  | type TimeoutRWMutex interface { | ||||||
|  | 	RWMutex | ||||||
|  | 
 | ||||||
|  | 	// LockFunc is functionally the same as Lock(), but allows setting a custom hook called on timeout | ||||||
|  | 	LockFunc(func()) func() | ||||||
|  | 
 | ||||||
|  | 	// RLockFunc is functionally the same as RLock(), but allows setting a custom hook called on timeout | ||||||
|  | 	RLockFunc(func()) func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithTimeout wraps the supplied Mutex to add a timeout | ||||||
|  | func WithTimeout(mu Mutex, d time.Duration) TimeoutMutex { | ||||||
|  | 	return &timeoutMutex{mu: mu, d: d} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithTimeoutRW wraps the supplied RWMutex to add read/write timeouts | ||||||
|  | func WithTimeoutRW(mu RWMutex, rd, wd time.Duration) TimeoutRWMutex { | ||||||
|  | 	return &timeoutRWMutex{mu: mu, rd: rd, wd: wd} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // timeoutMutex wraps a Mutex with timeout | ||||||
|  | type timeoutMutex struct { | ||||||
|  | 	mu Mutex         // mu is the wrapped mutex | ||||||
|  | 	d  time.Duration // d is the timeout duration | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *timeoutMutex) Lock() func() { | ||||||
|  | 	return mu.LockFunc(func() { panic("timed out") }) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *timeoutMutex) LockFunc(fn func()) func() { | ||||||
|  | 	return mutexTimeout(mu.d, mu.mu.Lock(), fn) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TimeoutRWMutex wraps a RWMutex with timeouts | ||||||
|  | type timeoutRWMutex struct { | ||||||
|  | 	mu RWMutex       // mu is the wrapped rwmutex | ||||||
|  | 	rd time.Duration // rd is the rlock timeout duration | ||||||
|  | 	wd time.Duration // wd is the lock timeout duration | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *timeoutRWMutex) Lock() func() { | ||||||
|  | 	return mu.LockFunc(func() { panic("timed out") }) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *timeoutRWMutex) LockFunc(fn func()) func() { | ||||||
|  | 	return mutexTimeout(mu.wd, mu.mu.Lock(), fn) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *timeoutRWMutex) RLock() func() { | ||||||
|  | 	return mu.RLockFunc(func() { panic("timed out") }) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mu *timeoutRWMutex) RLockFunc(fn func()) func() { | ||||||
|  | 	return mutexTimeout(mu.rd, mu.mu.RLock(), fn) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // timeoutPool provides nowish.Timeout objects for timeout mutexes | ||||||
|  | var timeoutPool = sync.Pool{ | ||||||
|  | 	New: func() interface{} { | ||||||
|  | 		return nowish.NewTimeout() | ||||||
|  | 	}, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // mutexTimeout performs a timed unlock, calling supplied fn if timeout is reached | ||||||
|  | func mutexTimeout(d time.Duration, unlock func(), fn func()) func() { | ||||||
|  | 	if d < 1 { | ||||||
|  | 		// No timeout, just unlock | ||||||
|  | 		return unlock | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Acquire timeout obj | ||||||
|  | 	t := timeoutPool.Get().(nowish.Timeout) | ||||||
|  | 
 | ||||||
|  | 	// Start the timeout with hook | ||||||
|  | 	t.Start(d, fn) | ||||||
|  | 
 | ||||||
|  | 	// Return func cancelling timeout, | ||||||
|  | 	// replacing Timeout in pool and | ||||||
|  | 	// finally unlocking mutex | ||||||
|  | 	return func() { | ||||||
|  | 		t.Cancel() | ||||||
|  | 		timeoutPool.Put(t) | ||||||
|  | 		unlock() | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										5
									
								
								vendor/git.iim.gay/grufwub/go-nowish/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								vendor/git.iim.gay/grufwub/go-nowish/README.md
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,5 @@ | ||||||
|  | nowish is a very simple library for creating Go clocks that give a good (ish) | ||||||
|  | estimate of the "now" time, "ish" depending on the precision you request | ||||||
|  | 
 | ||||||
|  | similar to fastime, but more bare bones and using unsafe pointers instead of | ||||||
|  | atomic value since we don't need to worry about type changes | ||||||
							
								
								
									
										141
									
								
								vendor/git.iim.gay/grufwub/go-nowish/time.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								vendor/git.iim.gay/grufwub/go-nowish/time.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,141 @@ | ||||||
|  | package nowish | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync" | ||||||
|  | 	"sync/atomic" | ||||||
|  | 	"time" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Start returns a new Clock instance initialized and | ||||||
|  | // started with the provided precision, along with the | ||||||
|  | // stop function for it's underlying timer | ||||||
|  | func Start(precision time.Duration) (*Clock, func()) { | ||||||
|  | 	c := Clock{} | ||||||
|  | 	return &c, c.Start(precision) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Clock struct { | ||||||
|  | 	noCopy noCopy //nolint noCopy because a copy will fuck with atomics | ||||||
|  | 
 | ||||||
|  | 	// format stores the time formatting style string | ||||||
|  | 	format string | ||||||
|  | 
 | ||||||
|  | 	// valid indicates whether the current value stored in .Format is valid | ||||||
|  | 	valid uint32 | ||||||
|  | 
 | ||||||
|  | 	// mutex protects writes to .Format, not because it would be unsafe, but | ||||||
|  | 	// because we want to minimize unnnecessary allocations | ||||||
|  | 	mutex sync.Mutex | ||||||
|  | 
 | ||||||
|  | 	// Format is an unsafe pointer to the last-updated time format string | ||||||
|  | 	Format unsafe.Pointer | ||||||
|  | 
 | ||||||
|  | 	// Time is an unsafe pointer to the last-updated time.Time object | ||||||
|  | 	Time unsafe.Pointer | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Start starts the clock with the provided precision, the | ||||||
|  | // returned function is the stop function for the underlying timer | ||||||
|  | func (c *Clock) Start(precision time.Duration) func() { | ||||||
|  | 	// Create ticker from duration | ||||||
|  | 	tick := time.NewTicker(precision) | ||||||
|  | 
 | ||||||
|  | 	// Set initial time | ||||||
|  | 	t := time.Now() | ||||||
|  | 	atomic.StorePointer(&c.Time, unsafe.Pointer(&t)) | ||||||
|  | 
 | ||||||
|  | 	// Set initial format | ||||||
|  | 	s := "" | ||||||
|  | 	atomic.StorePointer(&c.Format, unsafe.Pointer(&s)) | ||||||
|  | 
 | ||||||
|  | 	// If formatting string unset, set default | ||||||
|  | 	c.mutex.Lock() | ||||||
|  | 	if c.format == "" { | ||||||
|  | 		c.format = time.RFC822 | ||||||
|  | 	} | ||||||
|  | 	c.mutex.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Start main routine | ||||||
|  | 	go c.run(tick) | ||||||
|  | 
 | ||||||
|  | 	// Return stop fn | ||||||
|  | 	return tick.Stop | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // run is the internal clock ticking loop | ||||||
|  | func (c *Clock) run(tick *time.Ticker) { | ||||||
|  | 	for { | ||||||
|  | 		// Wait on tick | ||||||
|  | 		_, ok := <-tick.C | ||||||
|  | 
 | ||||||
|  | 		// Channel closed | ||||||
|  | 		if !ok { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Update time | ||||||
|  | 		t := time.Now() | ||||||
|  | 		atomic.StorePointer(&c.Time, unsafe.Pointer(&t)) | ||||||
|  | 
 | ||||||
|  | 		// Invalidate format string | ||||||
|  | 		atomic.StoreUint32(&c.valid, 0) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Now returns a good (ish) estimate of the current 'now' time | ||||||
|  | func (c *Clock) Now() time.Time { | ||||||
|  | 	return *(*time.Time)(atomic.LoadPointer(&c.Time)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NowFormat returns the formatted "now" time, cached until next tick and "now" updates | ||||||
|  | func (c *Clock) NowFormat() string { | ||||||
|  | 	// If format still valid, return this | ||||||
|  | 	if atomic.LoadUint32(&c.valid) == 1 { | ||||||
|  | 		return *(*string)(atomic.LoadPointer(&c.Format)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Get mutex lock | ||||||
|  | 	c.mutex.Lock() | ||||||
|  | 
 | ||||||
|  | 	// Double check still invalid | ||||||
|  | 	if atomic.LoadUint32(&c.valid) == 1 { | ||||||
|  | 		c.mutex.Unlock() | ||||||
|  | 		return *(*string)(atomic.LoadPointer(&c.Format)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Calculate time format | ||||||
|  | 	b := c.Now().AppendFormat( | ||||||
|  | 		make([]byte, 0, len(c.format)), | ||||||
|  | 		c.format, | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	// Update the stored value and set valid! | ||||||
|  | 	atomic.StorePointer(&c.Format, unsafe.Pointer(&b)) | ||||||
|  | 	atomic.StoreUint32(&c.valid, 1) | ||||||
|  | 
 | ||||||
|  | 	// Unlock and return | ||||||
|  | 	c.mutex.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Note: | ||||||
|  | 	// it's safe to do this conversion here | ||||||
|  | 	// because this byte slice will never change. | ||||||
|  | 	// and we have the direct pointer to it, we're | ||||||
|  | 	// not requesting it atomicly via c.Format | ||||||
|  | 	return *(*string)(unsafe.Pointer(&b)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetFormat sets the time format string used by .NowFormat() | ||||||
|  | func (c *Clock) SetFormat(format string) { | ||||||
|  | 	// Get mutex lock | ||||||
|  | 	c.mutex.Lock() | ||||||
|  | 
 | ||||||
|  | 	// Update time format | ||||||
|  | 	c.format = format | ||||||
|  | 
 | ||||||
|  | 	// Invalidate current format string | ||||||
|  | 	atomic.StoreUint32(&c.valid, 0) | ||||||
|  | 
 | ||||||
|  | 	// Unlock | ||||||
|  | 	c.mutex.Unlock() | ||||||
|  | } | ||||||
							
								
								
									
										111
									
								
								vendor/git.iim.gay/grufwub/go-nowish/timeout.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								vendor/git.iim.gay/grufwub/go-nowish/timeout.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,111 @@ | ||||||
|  | package nowish | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"sync/atomic" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ErrTimeoutStarted is returned if a Timeout interface is attempted to be reused while still in operation | ||||||
|  | var ErrTimeoutStarted = errors.New("nowish: timeout already started") | ||||||
|  | 
 | ||||||
|  | // timeoutState provides a thread-safe timeout state mechanism | ||||||
|  | type timeoutState uint32 | ||||||
|  | 
 | ||||||
|  | // start attempts to start the state, must be already reset, returns success | ||||||
|  | func (t *timeoutState) start() bool { | ||||||
|  | 	return atomic.CompareAndSwapUint32((*uint32)(t), 0, 1) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stop attempts to stop the state, must already be started, returns success | ||||||
|  | func (t *timeoutState) stop() bool { | ||||||
|  | 	return atomic.CompareAndSwapUint32((*uint32)(t), 1, 2) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // reset is fairly self explanatory | ||||||
|  | func (t *timeoutState) reset() { | ||||||
|  | 	atomic.StoreUint32((*uint32)(t), 0) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Timeout provides a reusable structure for enforcing timeouts with a cancel | ||||||
|  | type Timeout interface { | ||||||
|  | 	// Start starts the timer with supplied timeout. If timeout is reached before | ||||||
|  | 	// cancel then supplied timeout hook will be called. Error may be called if | ||||||
|  | 	// Timeout is already running when this function is called | ||||||
|  | 	Start(time.Duration, func()) error | ||||||
|  | 
 | ||||||
|  | 	// Cancel cancels the currently running timer. If a cancel is achieved, then | ||||||
|  | 	// this function will return after the timeout goroutine is finished | ||||||
|  | 	Cancel() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewTimeout returns a new Timeout instance | ||||||
|  | func NewTimeout() Timeout { | ||||||
|  | 	t := &timeout{ | ||||||
|  | 		tk: time.NewTicker(time.Minute), | ||||||
|  | 		ch: make(chan struct{}), | ||||||
|  | 	} | ||||||
|  | 	t.tk.Stop() // don't keep it running | ||||||
|  | 	return t | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // timeout is the Timeout implementation that we force | ||||||
|  | // initialization on via NewTimeout by unexporting it | ||||||
|  | type timeout struct { | ||||||
|  | 	noCopy noCopy //nolint noCopy because a copy will mess with atomics | ||||||
|  | 
 | ||||||
|  | 	tk *time.Ticker  // tk is the underlying timeout-timer | ||||||
|  | 	ch chan struct{} // ch is the cancel propagation channel | ||||||
|  | 	st timeoutState  // st stores the current timeout state (and protects concurrent use) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *timeout) Start(d time.Duration, hook func()) error { | ||||||
|  | 	// Attempt to acquire start | ||||||
|  | 	if !t.st.start() { | ||||||
|  | 		return ErrTimeoutStarted | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Start the ticker | ||||||
|  | 	t.tk.Reset(d) | ||||||
|  | 
 | ||||||
|  | 	go func() { | ||||||
|  | 		cancelled := false | ||||||
|  | 
 | ||||||
|  | 		select { | ||||||
|  | 		// Timeout reached | ||||||
|  | 		case <-t.tk.C: | ||||||
|  | 			if !t.st.stop() { | ||||||
|  | 				// cancel was called in the nick of time | ||||||
|  | 				<-t.ch | ||||||
|  | 				cancelled = true | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 		// Cancel called | ||||||
|  | 		case <-t.ch: | ||||||
|  | 			cancelled = true | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Stop ticker | ||||||
|  | 		t.tk.Stop() | ||||||
|  | 
 | ||||||
|  | 		// If timed out call hook | ||||||
|  | 		if !cancelled { | ||||||
|  | 			hook() | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Finally, reset state | ||||||
|  | 		t.st.reset() | ||||||
|  | 	}() | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *timeout) Cancel() { | ||||||
|  | 	// Attempt to acquire stop | ||||||
|  | 	if !t.st.stop() { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Send a cancel signal | ||||||
|  | 	t.ch <- struct{}{} | ||||||
|  | } | ||||||
							
								
								
									
										10
									
								
								vendor/git.iim.gay/grufwub/go-nowish/util.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/git.iim.gay/grufwub/go-nowish/util.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,10 @@ | ||||||
|  | package nowish | ||||||
|  | 
 | ||||||
|  | //nolint | ||||||
|  | type noCopy struct{} | ||||||
|  | 
 | ||||||
|  | //nolint | ||||||
|  | func (*noCopy) Lock() {} | ||||||
|  | 
 | ||||||
|  | //nolint | ||||||
|  | func (*noCopy) Unlock() {} | ||||||
							
								
								
									
										64
									
								
								vendor/git.iim.gay/grufwub/go-store/kv/iterator.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								vendor/git.iim.gay/grufwub/go-store/kv/iterator.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,64 @@ | ||||||
|  | package kv | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"git.iim.gay/grufwub/go-errors" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/storage" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ErrIteratorClosed = errors.Define("store/kv: iterator closed") | ||||||
|  | 
 | ||||||
|  | // KVIterator provides a read-only iterator to all the key-value | ||||||
|  | // pairs in a KVStore. While the iterator is open the store is read | ||||||
|  | // locked, you MUST release the iterator when you are finished with | ||||||
|  | // it. | ||||||
|  | // | ||||||
|  | // Please note: | ||||||
|  | // - individual iterators are NOT concurrency safe, though it is safe to | ||||||
|  | // have multiple iterators running concurrently | ||||||
|  | type KVIterator struct { | ||||||
|  | 	store   *KVStore // store is the linked KVStore | ||||||
|  | 	entries []storage.StorageEntry | ||||||
|  | 	index   int | ||||||
|  | 	key     string | ||||||
|  | 	onClose func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Next attempts to set the next key-value pair, the | ||||||
|  | // return value is if there was another pair remaining | ||||||
|  | func (i *KVIterator) Next() bool { | ||||||
|  | 	next := i.index + 1 | ||||||
|  | 	if next >= len(i.entries) { | ||||||
|  | 		i.key = "" | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	i.key = i.entries[next].Key() | ||||||
|  | 	i.index = next | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Key returns the next key from the store | ||||||
|  | func (i *KVIterator) Key() string { | ||||||
|  | 	return i.key | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Release releases the KVIterator and KVStore's read lock | ||||||
|  | func (i *KVIterator) Release() { | ||||||
|  | 	// Reset key, path, entries | ||||||
|  | 	i.store = nil | ||||||
|  | 	i.key = "" | ||||||
|  | 	i.entries = nil | ||||||
|  | 
 | ||||||
|  | 	// Perform requested callback | ||||||
|  | 	i.onClose() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Value returns the next value from the KVStore | ||||||
|  | func (i *KVIterator) Value() ([]byte, error) { | ||||||
|  | 	// Check store isn't closed | ||||||
|  | 	if i.store == nil { | ||||||
|  | 		return nil, ErrIteratorClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to fetch from store | ||||||
|  | 	return i.store.get(i.key) | ||||||
|  | } | ||||||
							
								
								
									
										125
									
								
								vendor/git.iim.gay/grufwub/go-store/kv/state.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								vendor/git.iim.gay/grufwub/go-store/kv/state.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,125 @@ | ||||||
|  | package kv | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-errors" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ErrStateClosed = errors.Define("store/kv: state closed") | ||||||
|  | 
 | ||||||
|  | // StateRO provides a read-only window to the store. While this | ||||||
|  | // state is active during the Read() function window, the entire | ||||||
|  | // store will be read-locked. The state is thread-safe for concurrent | ||||||
|  | // use UNTIL the moment that your supplied function to Read() returns, | ||||||
|  | // then the state has zero guarantees | ||||||
|  | type StateRO struct { | ||||||
|  | 	store *KVStore | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRO) Get(key string) ([]byte, error) { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return nil, ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.get(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRO) GetStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return nil, ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.getStream(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRO) Has(key string) (bool, error) { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return false, ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.has(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRO) close() { | ||||||
|  | 	st.store = nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StateRW provides a read-write window to the store. While this | ||||||
|  | // state is active during the Update() function window, the entire | ||||||
|  | // store will be locked. The state is thread-safe for concurrent | ||||||
|  | // use UNTIL the moment that your supplied function to Update() returns, | ||||||
|  | // then the state has zero guarantees | ||||||
|  | type StateRW struct { | ||||||
|  | 	store *KVStore | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) Get(key string) ([]byte, error) { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return nil, ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.get(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) GetStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return nil, ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.getStream(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) Put(key string, value []byte) error { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.put(key, value) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) PutStream(key string, r io.Reader) error { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.putStream(key, r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) Has(key string) (bool, error) { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return false, ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.has(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) Delete(key string) error { | ||||||
|  | 	// Check not closed | ||||||
|  | 	if st.store == nil { | ||||||
|  | 		return ErrStateClosed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Pass request to store | ||||||
|  | 	return st.store.delete(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *StateRW) close() { | ||||||
|  | 	st.store = nil | ||||||
|  | } | ||||||
							
								
								
									
										243
									
								
								vendor/git.iim.gay/grufwub/go-store/kv/store.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										243
									
								
								vendor/git.iim.gay/grufwub/go-store/kv/store.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,243 @@ | ||||||
|  | package kv | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-mutexes" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/storage" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // KVStore is a very simple, yet performant key-value store | ||||||
|  | type KVStore struct { | ||||||
|  | 	mutexMap mutexes.MutexMap // mutexMap is a map of keys to mutexes to protect file access | ||||||
|  | 	mutex    sync.RWMutex     // mutex is the total store mutex | ||||||
|  | 	storage  storage.Storage  // storage is the underlying storage | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func OpenFile(path string, cfg *storage.DiskConfig) (*KVStore, error) { | ||||||
|  | 	// Attempt to open disk storage | ||||||
|  | 	storage, err := storage.OpenFile(path, cfg) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new KVStore | ||||||
|  | 	return OpenStorage(storage) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func OpenBlock(path string, cfg *storage.BlockConfig) (*KVStore, error) { | ||||||
|  | 	// Attempt to open block storage | ||||||
|  | 	storage, err := storage.OpenBlock(path, cfg) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new KVStore | ||||||
|  | 	return OpenStorage(storage) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func OpenStorage(storage storage.Storage) (*KVStore, error) { | ||||||
|  | 	// Perform initial storage clean | ||||||
|  | 	err := storage.Clean() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new KVStore | ||||||
|  | 	return &KVStore{ | ||||||
|  | 		mutexMap: mutexes.NewMap(mutexes.NewRW), | ||||||
|  | 		mutex:    sync.RWMutex{}, | ||||||
|  | 		storage:  storage, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Get fetches the bytes for supplied key in the store | ||||||
|  | func (st *KVStore) Get(key string) ([]byte, error) { | ||||||
|  | 	// Acquire store read lock | ||||||
|  | 	st.mutex.RLock() | ||||||
|  | 	defer st.mutex.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// Pass to unprotected fn | ||||||
|  | 	return st.get(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *KVStore) get(key string) ([]byte, error) { | ||||||
|  | 	// Acquire read lock for key | ||||||
|  | 	runlock := st.mutexMap.RLock(key) | ||||||
|  | 	defer runlock() | ||||||
|  | 
 | ||||||
|  | 	// Read file bytes | ||||||
|  | 	return st.storage.ReadBytes(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetStream fetches a ReadCloser for the bytes at the supplied key location in the store | ||||||
|  | func (st *KVStore) GetStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	// Acquire store read lock | ||||||
|  | 	st.mutex.RLock() | ||||||
|  | 	defer st.mutex.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// Pass to unprotected fn | ||||||
|  | 	return st.getStream(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *KVStore) getStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	// Acquire read lock for key | ||||||
|  | 	runlock := st.mutexMap.RLock(key) | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open stream for read | ||||||
|  | 	rd, err := st.storage.ReadStream(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		runlock() | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Wrap readcloser in our own callback closer | ||||||
|  | 	return util.ReadCloserWithCallback(rd, runlock), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Put places the bytes at the supplied key location in the store | ||||||
|  | func (st *KVStore) Put(key string, value []byte) error { | ||||||
|  | 	// Acquire store write lock | ||||||
|  | 	st.mutex.Lock() | ||||||
|  | 	defer st.mutex.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Pass to unprotected fn | ||||||
|  | 	return st.put(key, value) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *KVStore) put(key string, value []byte) error { | ||||||
|  | 	// Acquire write lock for key | ||||||
|  | 	unlock := st.mutexMap.Lock(key) | ||||||
|  | 	defer unlock() | ||||||
|  | 
 | ||||||
|  | 	// Write file bytes | ||||||
|  | 	return st.storage.WriteBytes(key, value) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // PutStream writes the bytes from the supplied Reader at the supplied key location in the store | ||||||
|  | func (st *KVStore) PutStream(key string, r io.Reader) error { | ||||||
|  | 	// Acquire store write lock | ||||||
|  | 	st.mutex.Lock() | ||||||
|  | 	defer st.mutex.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Pass to unprotected fn | ||||||
|  | 	return st.putStream(key, r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *KVStore) putStream(key string, r io.Reader) error { | ||||||
|  | 	// Acquire write lock for key | ||||||
|  | 	unlock := st.mutexMap.Lock(key) | ||||||
|  | 	defer unlock() | ||||||
|  | 
 | ||||||
|  | 	// Write file stream | ||||||
|  | 	return st.storage.WriteStream(key, r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Has checks whether the supplied key exists in the store | ||||||
|  | func (st *KVStore) Has(key string) (bool, error) { | ||||||
|  | 	// Acquire store read lock | ||||||
|  | 	st.mutex.RLock() | ||||||
|  | 	defer st.mutex.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// Pass to unprotected fn | ||||||
|  | 	return st.has(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *KVStore) has(key string) (bool, error) { | ||||||
|  | 	// Acquire read lock for key | ||||||
|  | 	runlock := st.mutexMap.RLock(key) | ||||||
|  | 	defer runlock() | ||||||
|  | 
 | ||||||
|  | 	// Stat file on disk | ||||||
|  | 	return st.storage.Stat(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Delete removes the supplied key-value pair from the store | ||||||
|  | func (st *KVStore) Delete(key string) error { | ||||||
|  | 	// Acquire store write lock | ||||||
|  | 	st.mutex.Lock() | ||||||
|  | 	defer st.mutex.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Pass to unprotected fn | ||||||
|  | 	return st.delete(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *KVStore) delete(key string) error { | ||||||
|  | 	// Acquire write lock for key | ||||||
|  | 	unlock := st.mutexMap.Lock(key) | ||||||
|  | 	defer unlock() | ||||||
|  | 
 | ||||||
|  | 	// Remove file from disk | ||||||
|  | 	return st.storage.Remove(key) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Iterator returns an Iterator for key-value pairs in the store, using supplied match function | ||||||
|  | func (st *KVStore) Iterator(matchFn func(string) bool) (*KVIterator, error) { | ||||||
|  | 	// If no function, match all | ||||||
|  | 	if matchFn == nil { | ||||||
|  | 		matchFn = func(string) bool { return true } | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Get store read lock | ||||||
|  | 	st.mutex.RLock() | ||||||
|  | 
 | ||||||
|  | 	// Setup the walk keys function | ||||||
|  | 	entries := []storage.StorageEntry{} | ||||||
|  | 	walkFn := func(entry storage.StorageEntry) { | ||||||
|  | 		// Ignore unmatched entries | ||||||
|  | 		if !matchFn(entry.Key()) { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Add to entries | ||||||
|  | 		entries = append(entries, entry) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Walk keys in the storage | ||||||
|  | 	err := st.storage.WalkKeys(&storage.WalkKeysOptions{WalkFn: walkFn}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		st.mutex.RUnlock() | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new iterator | ||||||
|  | 	return &KVIterator{ | ||||||
|  | 		store:   st, | ||||||
|  | 		entries: entries, | ||||||
|  | 		index:   -1, | ||||||
|  | 		key:     "", | ||||||
|  | 		onClose: st.mutex.RUnlock, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read provides a read-only window to the store, holding it in a read-locked state until | ||||||
|  | // the supplied function returns | ||||||
|  | func (st *KVStore) Read(do func(*StateRO)) { | ||||||
|  | 	// Get store read lock | ||||||
|  | 	st.mutex.RLock() | ||||||
|  | 	defer st.mutex.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// Create new store state (defer close) | ||||||
|  | 	state := &StateRO{store: st} | ||||||
|  | 	defer state.close() | ||||||
|  | 
 | ||||||
|  | 	// Pass state | ||||||
|  | 	do(state) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Update provides a read-write window to the store, holding it in a read-write-locked state | ||||||
|  | // until the supplied functions returns | ||||||
|  | func (st *KVStore) Update(do func(*StateRW)) { | ||||||
|  | 	// Get store lock | ||||||
|  | 	st.mutex.Lock() | ||||||
|  | 	defer st.mutex.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Create new store state (defer close) | ||||||
|  | 	state := &StateRW{store: st} | ||||||
|  | 	defer state.close() | ||||||
|  | 
 | ||||||
|  | 	// Pass state | ||||||
|  | 	do(state) | ||||||
|  | } | ||||||
							
								
								
									
										785
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/block.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										785
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/block.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,785 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"crypto/sha256" | ||||||
|  | 	"io" | ||||||
|  | 	"io/fs" | ||||||
|  | 	"os" | ||||||
|  | 	"strings" | ||||||
|  | 	"sync" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/fastpath" | ||||||
|  | 	"git.iim.gay/grufwub/go-bytes" | ||||||
|  | 	"git.iim.gay/grufwub/go-errors" | ||||||
|  | 	"git.iim.gay/grufwub/go-hashenc" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	nodePathPrefix  = "node/" | ||||||
|  | 	blockPathPrefix = "block/" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // DefaultBlockConfig is the default BlockStorage configuration | ||||||
|  | var DefaultBlockConfig = &BlockConfig{ | ||||||
|  | 	BlockSize:    1024 * 16, | ||||||
|  | 	WriteBufSize: 4096, | ||||||
|  | 	Overwrite:    false, | ||||||
|  | 	Compression:  NoCompression(), | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlockConfig defines options to be used when opening a BlockStorage | ||||||
|  | type BlockConfig struct { | ||||||
|  | 	// BlockSize is the chunking size to use when splitting and storing blocks of data | ||||||
|  | 	BlockSize int | ||||||
|  | 
 | ||||||
|  | 	// WriteBufSize is the buffer size to use when writing file streams (PutStream) | ||||||
|  | 	WriteBufSize int | ||||||
|  | 
 | ||||||
|  | 	// Overwrite allows overwriting values of stored keys in the storage | ||||||
|  | 	Overwrite bool | ||||||
|  | 
 | ||||||
|  | 	// Compression is the Compressor to use when reading / writing files, default is no compression | ||||||
|  | 	Compression Compressor | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getBlockConfig returns a valid BlockConfig for supplied ptr | ||||||
|  | func getBlockConfig(cfg *BlockConfig) BlockConfig { | ||||||
|  | 	// If nil, use default | ||||||
|  | 	if cfg == nil { | ||||||
|  | 		cfg = DefaultBlockConfig | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume nil compress == none | ||||||
|  | 	if cfg.Compression == nil { | ||||||
|  | 		cfg.Compression = NoCompression() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume 0 chunk size == use default | ||||||
|  | 	if cfg.BlockSize < 1 { | ||||||
|  | 		cfg.BlockSize = DefaultBlockConfig.BlockSize | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume 0 buf size == use default | ||||||
|  | 	if cfg.WriteBufSize < 1 { | ||||||
|  | 		cfg.WriteBufSize = DefaultDiskConfig.WriteBufSize | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return owned config copy | ||||||
|  | 	return BlockConfig{ | ||||||
|  | 		BlockSize:    cfg.BlockSize, | ||||||
|  | 		WriteBufSize: cfg.WriteBufSize, | ||||||
|  | 		Overwrite:    cfg.Overwrite, | ||||||
|  | 		Compression:  cfg.Compression, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlockStorage is a Storage implementation that stores input data as chunks on | ||||||
|  | // a filesystem. Each value is chunked into blocks of configured size and these | ||||||
|  | // blocks are stored with name equal to their base64-encoded SHA256 hash-sum. A | ||||||
|  | // "node" file is finally created containing an array of hashes contained within | ||||||
|  | // this value | ||||||
|  | type BlockStorage struct { | ||||||
|  | 	path      string      // path is the root path of this store | ||||||
|  | 	blockPath string      // blockPath is the joined root path + block path prefix | ||||||
|  | 	nodePath  string      // nodePath is the joined root path + node path prefix | ||||||
|  | 	config    BlockConfig // cfg is the supplied configuration for this store | ||||||
|  | 	hashPool  sync.Pool   // hashPool is this store's hashEncoder pool | ||||||
|  | 
 | ||||||
|  | 	// NOTE: | ||||||
|  | 	// BlockStorage does not need to lock each of the underlying block files | ||||||
|  | 	// as the filename itself directly relates to the contents. If there happens | ||||||
|  | 	// to be an overwrite, it will just be of the same data since the filename is | ||||||
|  | 	// the hash of the data. | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // OpenBlock opens a BlockStorage instance for given folder path and configuration | ||||||
|  | func OpenBlock(path string, cfg *BlockConfig) (*BlockStorage, error) { | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := util.AcquirePathBuilder() | ||||||
|  | 	defer util.ReleasePathBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Clean provided path, ensure ends in '/' (should | ||||||
|  | 	// be dir, this helps with file path trimming later) | ||||||
|  | 	path = pb.Clean(path) + "/" | ||||||
|  | 
 | ||||||
|  | 	// Get checked config | ||||||
|  | 	config := getBlockConfig(cfg) | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open path | ||||||
|  | 	file, err := os.OpenFile(path, defaultFileROFlags, defaultDirPerms) | ||||||
|  | 	if err != nil { | ||||||
|  | 		// If not a not-exist error, return | ||||||
|  | 		if !os.IsNotExist(err) { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Attempt to make store path dirs | ||||||
|  | 		err = os.MkdirAll(path, defaultDirPerms) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Reopen dir now it's been created | ||||||
|  | 		file, err = os.OpenFile(path, defaultFileROFlags, defaultDirPerms) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Double check this is a dir (NOT a file!) | ||||||
|  | 	stat, err := file.Stat() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} else if !stat.IsDir() { | ||||||
|  | 		return nil, errPathIsFile | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new BlockStorage | ||||||
|  | 	return &BlockStorage{ | ||||||
|  | 		path:      path, | ||||||
|  | 		blockPath: pb.Join(path, blockPathPrefix), | ||||||
|  | 		nodePath:  pb.Join(path, nodePathPrefix), | ||||||
|  | 		config:    config, | ||||||
|  | 		hashPool: sync.Pool{ | ||||||
|  | 			New: func() interface{} { | ||||||
|  | 				return newHashEncoder() | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Clean implements storage.Clean() | ||||||
|  | func (st *BlockStorage) Clean() error { | ||||||
|  | 	nodes := map[string]*node{} | ||||||
|  | 
 | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := fastpath.AcquireBuilder() | ||||||
|  | 	defer fastpath.ReleaseBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Walk nodes dir for entries | ||||||
|  | 	onceErr := errors.OnceError{} | ||||||
|  | 	err := util.WalkDir(pb, st.nodePath, func(npath string, fsentry fs.DirEntry) { | ||||||
|  | 		// Only deal with regular files | ||||||
|  | 		if !fsentry.Type().IsRegular() { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Stop if we hit error previously | ||||||
|  | 		if onceErr.IsSet() { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Get joined node path name | ||||||
|  | 		npath = pb.Join(npath, fsentry.Name()) | ||||||
|  | 
 | ||||||
|  | 		// Attempt to open RO file | ||||||
|  | 		file, err := open(npath, defaultFileROFlags) | ||||||
|  | 		if err != nil { | ||||||
|  | 			onceErr.Store(err) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		defer file.Close() | ||||||
|  | 
 | ||||||
|  | 		// Alloc new Node + acquire hash buffer for writes | ||||||
|  | 		hbuf := util.AcquireBuffer(encodedHashLen) | ||||||
|  | 		defer util.ReleaseBuffer(hbuf) | ||||||
|  | 		node := node{} | ||||||
|  | 
 | ||||||
|  | 		// Write file contents to node | ||||||
|  | 		_, err = io.CopyBuffer( | ||||||
|  | 			&nodeWriter{ | ||||||
|  | 				node: &node, | ||||||
|  | 				buf:  hbuf, | ||||||
|  | 			}, | ||||||
|  | 			file, | ||||||
|  | 			nil, | ||||||
|  | 		) | ||||||
|  | 		if err != nil { | ||||||
|  | 			onceErr.Store(err) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Append to nodes slice | ||||||
|  | 		nodes[fsentry.Name()] = &node | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	// Handle errors (though nodePath may not have been created yet) | ||||||
|  | 	if err != nil && !os.IsNotExist(err) { | ||||||
|  | 		return err | ||||||
|  | 	} else if onceErr.IsSet() { | ||||||
|  | 		return onceErr.Load() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Walk blocks dir for entries | ||||||
|  | 	onceErr.Reset() | ||||||
|  | 	err = util.WalkDir(pb, st.blockPath, func(bpath string, fsentry fs.DirEntry) { | ||||||
|  | 		// Only deal with regular files | ||||||
|  | 		if !fsentry.Type().IsRegular() { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Stop if we hit error previously | ||||||
|  | 		if onceErr.IsSet() { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		inUse := false | ||||||
|  | 		for key, node := range nodes { | ||||||
|  | 			if node.removeHash(fsentry.Name()) { | ||||||
|  | 				if len(node.hashes) < 1 { | ||||||
|  | 					// This node contained hash, and after removal is now empty. | ||||||
|  | 					// Remove this node from our tracked nodes slice | ||||||
|  | 					delete(nodes, key) | ||||||
|  | 				} | ||||||
|  | 				inUse = true | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Block hash is used by node | ||||||
|  | 		if inUse { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Get joined block path name | ||||||
|  | 		bpath = pb.Join(bpath, fsentry.Name()) | ||||||
|  | 
 | ||||||
|  | 		// Remove this unused block path | ||||||
|  | 		err := os.Remove(bpath) | ||||||
|  | 		if err != nil { | ||||||
|  | 			onceErr.Store(err) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	// Handle errors (though blockPath may not have been created yet) | ||||||
|  | 	if err != nil && !os.IsNotExist(err) { | ||||||
|  | 		return err | ||||||
|  | 	} else if onceErr.IsSet() { | ||||||
|  | 		return onceErr.Load() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// If there are nodes left at this point, they are corrupt | ||||||
|  | 	// (i.e. they're referencing block hashes that don't exist) | ||||||
|  | 	if len(nodes) > 0 { | ||||||
|  | 		nodeKeys := []string{} | ||||||
|  | 		for key := range nodes { | ||||||
|  | 			nodeKeys = append(nodeKeys, key) | ||||||
|  | 		} | ||||||
|  | 		return errCorruptNodes.Extend("%v", nodeKeys) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadBytes implements Storage.ReadBytes() | ||||||
|  | func (st *BlockStorage) ReadBytes(key string) ([]byte, error) { | ||||||
|  | 	// Get stream reader for key | ||||||
|  | 	rc, err := st.ReadStream(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Read all bytes and return | ||||||
|  | 	return io.ReadAll(rc) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadStream implements Storage.ReadStream() | ||||||
|  | func (st *BlockStorage) ReadStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	// Get node file path for key | ||||||
|  | 	npath, err := st.nodePathForKey(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open RO file | ||||||
|  | 	file, err := open(npath, defaultFileROFlags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Alloc new Node + acquire hash buffer for writes | ||||||
|  | 	hbuf := util.AcquireBuffer(encodedHashLen) | ||||||
|  | 	defer util.ReleaseBuffer(hbuf) | ||||||
|  | 	node := node{} | ||||||
|  | 
 | ||||||
|  | 	// Write file contents to node | ||||||
|  | 	_, err = io.CopyBuffer( | ||||||
|  | 		&nodeWriter{ | ||||||
|  | 			node: &node, | ||||||
|  | 			buf:  hbuf, | ||||||
|  | 		}, | ||||||
|  | 		file, | ||||||
|  | 		nil, | ||||||
|  | 	) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new block reader | ||||||
|  | 	return util.NopReadCloser(&blockReader{ | ||||||
|  | 		storage: st, | ||||||
|  | 		node:    &node, | ||||||
|  | 	}), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (st *BlockStorage) readBlock(key string) ([]byte, error) { | ||||||
|  | 	// Get block file path for key | ||||||
|  | 	bpath := st.blockPathForKey(key) | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open RO file | ||||||
|  | 	file, err := open(bpath, defaultFileROFlags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Wrap the file in a compressor | ||||||
|  | 	cFile, err := st.config.Compression.Reader(file) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	defer cFile.Close() | ||||||
|  | 
 | ||||||
|  | 	// Read the entire file | ||||||
|  | 	return io.ReadAll(cFile) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteBytes implements Storage.WriteBytes() | ||||||
|  | func (st *BlockStorage) WriteBytes(key string, value []byte) error { | ||||||
|  | 	return st.WriteStream(key, bytes.NewReader(value)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteStream implements Storage.WriteStream() | ||||||
|  | func (st *BlockStorage) WriteStream(key string, r io.Reader) error { | ||||||
|  | 	// Get node file path for key | ||||||
|  | 	npath, err := st.nodePathForKey(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Check if this exists | ||||||
|  | 	ok, err := stat(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Check if we allow overwrites | ||||||
|  | 	if ok && !st.config.Overwrite { | ||||||
|  | 		return ErrAlreadyExists | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Ensure nodes dir (and any leading up to) exists | ||||||
|  | 	err = os.MkdirAll(st.nodePath, defaultDirPerms) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Ensure blocks dir (and any leading up to) exists | ||||||
|  | 	err = os.MkdirAll(st.blockPath, defaultDirPerms) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Alloc new node | ||||||
|  | 	node := node{} | ||||||
|  | 
 | ||||||
|  | 	// Acquire HashEncoder | ||||||
|  | 	hc := st.hashPool.Get().(*hashEncoder) | ||||||
|  | 	defer st.hashPool.Put(hc) | ||||||
|  | 
 | ||||||
|  | 	// Create new waitgroup and OnceError for | ||||||
|  | 	// goroutine error tracking and propagating | ||||||
|  | 	wg := sync.WaitGroup{} | ||||||
|  | 	onceErr := errors.OnceError{} | ||||||
|  | 
 | ||||||
|  | loop: | ||||||
|  | 	for !onceErr.IsSet() { | ||||||
|  | 		// Fetch new buffer for this loop | ||||||
|  | 		buf := util.AcquireBuffer(st.config.BlockSize) | ||||||
|  | 		buf.Grow(st.config.BlockSize) | ||||||
|  | 
 | ||||||
|  | 		// Read next chunk | ||||||
|  | 		n, err := io.ReadFull(r, buf.B) | ||||||
|  | 		switch err { | ||||||
|  | 		case nil, io.ErrUnexpectedEOF: | ||||||
|  | 			// do nothing | ||||||
|  | 		case io.EOF: | ||||||
|  | 			util.ReleaseBuffer(buf) | ||||||
|  | 			break loop | ||||||
|  | 		default: | ||||||
|  | 			util.ReleaseBuffer(buf) | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Hash the encoded data | ||||||
|  | 		sum := hc.EncodeSum(buf.B) | ||||||
|  | 
 | ||||||
|  | 		// Append to the node's hashes | ||||||
|  | 		node.hashes = append(node.hashes, sum.String()) | ||||||
|  | 
 | ||||||
|  | 		// If already on disk, skip | ||||||
|  | 		has, err := st.statBlock(sum.StringPtr()) | ||||||
|  | 		if err != nil { | ||||||
|  | 			util.ReleaseBuffer(buf) | ||||||
|  | 			return err | ||||||
|  | 		} else if has { | ||||||
|  | 			util.ReleaseBuffer(buf) | ||||||
|  | 			continue loop | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Write in separate goroutine | ||||||
|  | 		wg.Add(1) | ||||||
|  | 		go func() { | ||||||
|  | 			// Defer buffer release + signal done | ||||||
|  | 			defer func() { | ||||||
|  | 				util.ReleaseBuffer(buf) | ||||||
|  | 				wg.Done() | ||||||
|  | 			}() | ||||||
|  | 
 | ||||||
|  | 			// Write block to store at hash | ||||||
|  | 			err = st.writeBlock(sum.StringPtr(), buf.B[:n]) | ||||||
|  | 			if err != nil { | ||||||
|  | 				onceErr.Store(err) | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		}() | ||||||
|  | 
 | ||||||
|  | 		// We reached EOF | ||||||
|  | 		if n < buf.Len() { | ||||||
|  | 			break loop | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Wait, check errors | ||||||
|  | 	wg.Wait() | ||||||
|  | 	if onceErr.IsSet() { | ||||||
|  | 		return onceErr.Load() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// If no hashes created, return | ||||||
|  | 	if len(node.hashes) < 1 { | ||||||
|  | 		return errNoHashesWritten | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Prepare to swap error if need-be | ||||||
|  | 	errSwap := errSwapNoop | ||||||
|  | 
 | ||||||
|  | 	// Build file RW flags | ||||||
|  | 	// NOTE: we performed an initial check for | ||||||
|  | 	//       this before writing blocks, but if | ||||||
|  | 	//       the utilizer of this storage didn't | ||||||
|  | 	//       correctly mutex protect this key then | ||||||
|  | 	//       someone may have beaten us to the | ||||||
|  | 	//       punch at writing the node file. | ||||||
|  | 	flags := defaultFileRWFlags | ||||||
|  | 	if !st.config.Overwrite { | ||||||
|  | 		flags |= syscall.O_EXCL | ||||||
|  | 
 | ||||||
|  | 		// Catch + replace err exist | ||||||
|  | 		errSwap = errSwapExist | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open RW file | ||||||
|  | 	file, err := open(npath, flags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return errSwap(err) | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Acquire write buffer | ||||||
|  | 	buf := util.AcquireBuffer(st.config.WriteBufSize) | ||||||
|  | 	defer util.ReleaseBuffer(buf) | ||||||
|  | 	buf.Grow(st.config.WriteBufSize) | ||||||
|  | 
 | ||||||
|  | 	// Finally, write data to file | ||||||
|  | 	_, err = io.CopyBuffer(file, &nodeReader{node: &node}, nil) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // writeBlock writes the block with hash and supplied value to the filesystem | ||||||
|  | func (st *BlockStorage) writeBlock(hash string, value []byte) error { | ||||||
|  | 	// Get block file path for key | ||||||
|  | 	bpath := st.blockPathForKey(hash) | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open RW file | ||||||
|  | 	file, err := open(bpath, defaultFileRWFlags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		if err == ErrAlreadyExists { | ||||||
|  | 			err = nil /* race issue describe in struct NOTE */ | ||||||
|  | 		} | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Wrap the file in a compressor | ||||||
|  | 	cFile, err := st.config.Compression.Writer(file) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	defer cFile.Close() | ||||||
|  | 
 | ||||||
|  | 	// Write value to file | ||||||
|  | 	_, err = cFile.Write(value) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // statBlock checks for existence of supplied block hash | ||||||
|  | func (st *BlockStorage) statBlock(hash string) (bool, error) { | ||||||
|  | 	return stat(st.blockPathForKey(hash)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Stat implements Storage.Stat() | ||||||
|  | func (st *BlockStorage) Stat(key string) (bool, error) { | ||||||
|  | 	// Get node file path for key | ||||||
|  | 	kpath, err := st.nodePathForKey(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return false, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Check for file on disk | ||||||
|  | 	return stat(kpath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Remove implements Storage.Remove() | ||||||
|  | func (st *BlockStorage) Remove(key string) error { | ||||||
|  | 	// Get node file path for key | ||||||
|  | 	kpath, err := st.nodePathForKey(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to remove file | ||||||
|  | 	return os.Remove(kpath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WalkKeys implements Storage.WalkKeys() | ||||||
|  | func (st *BlockStorage) WalkKeys(opts *WalkKeysOptions) error { | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := fastpath.AcquireBuilder() | ||||||
|  | 	defer fastpath.ReleaseBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Walk dir for entries | ||||||
|  | 	return util.WalkDir(pb, st.nodePath, func(npath string, fsentry fs.DirEntry) { | ||||||
|  | 		// Only deal with regular files | ||||||
|  | 		if fsentry.Type().IsRegular() { | ||||||
|  | 			opts.WalkFn(entry(fsentry.Name())) | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // nodePathForKey calculates the node file path for supplied key | ||||||
|  | func (st *BlockStorage) nodePathForKey(key string) (string, error) { | ||||||
|  | 	// Path separators are illegal | ||||||
|  | 	if strings.Contains(key, "/") { | ||||||
|  | 		return "", ErrInvalidKey | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := util.AcquirePathBuilder() | ||||||
|  | 	defer util.ReleasePathBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Return joined + cleaned node-path | ||||||
|  | 	return pb.Join(st.nodePath, key), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // blockPathForKey calculates the block file path for supplied hash | ||||||
|  | func (st *BlockStorage) blockPathForKey(hash string) string { | ||||||
|  | 	pb := util.AcquirePathBuilder() | ||||||
|  | 	defer util.ReleasePathBuilder(pb) | ||||||
|  | 	return pb.Join(st.blockPath, hash) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // hashSeparator is the separating byte between block hashes | ||||||
|  | const hashSeparator = byte(':') | ||||||
|  | 
 | ||||||
|  | // node represents the contents of a node file in storage | ||||||
|  | type node struct { | ||||||
|  | 	hashes []string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // removeHash attempts to remove supplied block hash from the node's hash array | ||||||
|  | func (n *node) removeHash(hash string) bool { | ||||||
|  | 	haveDropped := false | ||||||
|  | 	for i := 0; i < len(n.hashes); { | ||||||
|  | 		if n.hashes[i] == hash { | ||||||
|  | 			// Drop this hash from slice | ||||||
|  | 			n.hashes = append(n.hashes[:i], n.hashes[i+1:]...) | ||||||
|  | 			haveDropped = true | ||||||
|  | 		} else { | ||||||
|  | 			// Continue iter | ||||||
|  | 			i++ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return haveDropped | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // nodeReader is an io.Reader implementation for the node file representation, | ||||||
|  | // which is useful when calculated node file is being written to the store | ||||||
|  | type nodeReader struct { | ||||||
|  | 	node *node | ||||||
|  | 	idx  int | ||||||
|  | 	last int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *nodeReader) Read(b []byte) (int, error) { | ||||||
|  | 	n := 0 | ||||||
|  | 
 | ||||||
|  | 	// '-1' means we missed writing | ||||||
|  | 	// hash separator on last iteration | ||||||
|  | 	if r.last == -1 { | ||||||
|  | 		b[n] = hashSeparator | ||||||
|  | 		n++ | ||||||
|  | 		r.last = 0 | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for r.idx < len(r.node.hashes) { | ||||||
|  | 		hash := r.node.hashes[r.idx] | ||||||
|  | 
 | ||||||
|  | 		// Copy into buffer + update read count | ||||||
|  | 		m := copy(b[n:], hash[r.last:]) | ||||||
|  | 		n += m | ||||||
|  | 
 | ||||||
|  | 		// If incomplete copy, return here | ||||||
|  | 		if m < len(hash)-r.last { | ||||||
|  | 			r.last = m | ||||||
|  | 			return n, nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Check we can write last separator | ||||||
|  | 		if n == len(b) { | ||||||
|  | 			r.last = -1 | ||||||
|  | 			return n, nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Write separator, iter, reset | ||||||
|  | 		b[n] = hashSeparator | ||||||
|  | 		n++ | ||||||
|  | 		r.idx++ | ||||||
|  | 		r.last = 0 | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// We reached end of hashes | ||||||
|  | 	return n, io.EOF | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // nodeWriter is an io.Writer implementation for the node file representation, | ||||||
|  | // which is useful when calculated node file is being read from the store | ||||||
|  | type nodeWriter struct { | ||||||
|  | 	node *node | ||||||
|  | 	buf  *bytes.Buffer | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (w *nodeWriter) Write(b []byte) (int, error) { | ||||||
|  | 	n := 0 | ||||||
|  | 
 | ||||||
|  | 	for { | ||||||
|  | 		// Find next hash separator position | ||||||
|  | 		idx := bytes.IndexByte(b[n:], hashSeparator) | ||||||
|  | 		if idx == -1 { | ||||||
|  | 			// Check we shouldn't be expecting it | ||||||
|  | 			if w.buf.Len() > encodedHashLen { | ||||||
|  | 				return n, errInvalidNode | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// Write all contents to buffer | ||||||
|  | 			w.buf.Write(b[n:]) | ||||||
|  | 			return len(b), nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Found hash separator, write | ||||||
|  | 		// current buf contents to Node hashes | ||||||
|  | 		w.buf.Write(b[n : n+idx]) | ||||||
|  | 		n += idx + 1 | ||||||
|  | 		if w.buf.Len() != encodedHashLen { | ||||||
|  | 			return n, errInvalidNode | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Append to hashes & reset | ||||||
|  | 		w.node.hashes = append(w.node.hashes, w.buf.String()) | ||||||
|  | 		w.buf.Reset() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // blockReader is an io.Reader implementation for the combined, linked block | ||||||
|  | // data contained with a node file. Basically, this allows reading value data | ||||||
|  | // from the store for a given node file | ||||||
|  | type blockReader struct { | ||||||
|  | 	storage *BlockStorage | ||||||
|  | 	node    *node | ||||||
|  | 	buf     []byte | ||||||
|  | 	prev    int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *blockReader) Read(b []byte) (int, error) { | ||||||
|  | 	n := 0 | ||||||
|  | 
 | ||||||
|  | 	// Data left in buf, copy as much as we | ||||||
|  | 	// can into supplied read buffer | ||||||
|  | 	if r.prev < len(r.buf)-1 { | ||||||
|  | 		n += copy(b, r.buf[r.prev:]) | ||||||
|  | 		r.prev += n | ||||||
|  | 		if n >= len(b) { | ||||||
|  | 			return n, nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for { | ||||||
|  | 		// Check we have any hashes left | ||||||
|  | 		if len(r.node.hashes) < 1 { | ||||||
|  | 			return n, io.EOF | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Get next key from slice | ||||||
|  | 		key := r.node.hashes[0] | ||||||
|  | 		r.node.hashes = r.node.hashes[1:] | ||||||
|  | 
 | ||||||
|  | 		// Attempt to fetch next batch of data | ||||||
|  | 		var err error | ||||||
|  | 		r.buf, err = r.storage.readBlock(key) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return n, err | ||||||
|  | 		} | ||||||
|  | 		r.prev = 0 | ||||||
|  | 
 | ||||||
|  | 		// Copy as much as can from new buffer | ||||||
|  | 		m := copy(b[n:], r.buf) | ||||||
|  | 		r.prev += m | ||||||
|  | 		n += m | ||||||
|  | 
 | ||||||
|  | 		// If we hit end of supplied buf, return | ||||||
|  | 		if n >= len(b) { | ||||||
|  | 			return n, nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // hashEncoder is a HashEncoder with built-in encode buffer | ||||||
|  | type hashEncoder struct { | ||||||
|  | 	henc hashenc.HashEncoder | ||||||
|  | 	ebuf []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // encodedHashLen is the once-calculated encoded hash-sum length | ||||||
|  | var encodedHashLen = hashenc.Base64().EncodedLen( | ||||||
|  | 	sha256.New().Size(), | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // newHashEncoder returns a new hashEncoder instance | ||||||
|  | func newHashEncoder() *hashEncoder { | ||||||
|  | 	hash := sha256.New() | ||||||
|  | 	enc := hashenc.Base64() | ||||||
|  | 	return &hashEncoder{ | ||||||
|  | 		henc: hashenc.New(hash, enc), | ||||||
|  | 		ebuf: make([]byte, enc.EncodedLen(hash.Size())), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // EncodeSum encodes the src data and returns resulting bytes, only valid until next call to EncodeSum() | ||||||
|  | func (henc *hashEncoder) EncodeSum(src []byte) bytes.Bytes { | ||||||
|  | 	henc.henc.EncodeSum(henc.ebuf, src) | ||||||
|  | 	return bytes.ToBytes(henc.ebuf) | ||||||
|  | } | ||||||
							
								
								
									
										104
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/compressor.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/compressor.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,104 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"compress/gzip" | ||||||
|  | 	"compress/zlib" | ||||||
|  | 	"io" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
|  | 	"github.com/golang/snappy" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Compressor defines a means of compressing/decompressing values going into a key-value store | ||||||
|  | type Compressor interface { | ||||||
|  | 	// Reader returns a new decompressing io.ReadCloser based on supplied (compressed) io.Reader | ||||||
|  | 	Reader(io.Reader) (io.ReadCloser, error) | ||||||
|  | 
 | ||||||
|  | 	// Writer returns a new compressing io.WriteCloser based on supplied (uncompressed) io.Writer | ||||||
|  | 	Writer(io.Writer) (io.WriteCloser, error) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type gzipCompressor struct { | ||||||
|  | 	level int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GZipCompressor returns a new Compressor that implements GZip at default compression level | ||||||
|  | func GZipCompressor() Compressor { | ||||||
|  | 	return GZipCompressorLevel(gzip.DefaultCompression) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GZipCompressorLevel returns a new Compressor that implements GZip at supplied compression level | ||||||
|  | func GZipCompressorLevel(level int) Compressor { | ||||||
|  | 	return &gzipCompressor{ | ||||||
|  | 		level: level, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *gzipCompressor) Reader(r io.Reader) (io.ReadCloser, error) { | ||||||
|  | 	return gzip.NewReader(r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *gzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { | ||||||
|  | 	return gzip.NewWriterLevel(w, c.level) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type zlibCompressor struct { | ||||||
|  | 	level int | ||||||
|  | 	dict  []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ZLibCompressor returns a new Compressor that implements ZLib at default compression level | ||||||
|  | func ZLibCompressor() Compressor { | ||||||
|  | 	return ZLibCompressorLevelDict(zlib.DefaultCompression, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ZLibCompressorLevel returns a new Compressor that implements ZLib at supplied compression level | ||||||
|  | func ZLibCompressorLevel(level int) Compressor { | ||||||
|  | 	return ZLibCompressorLevelDict(level, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ZLibCompressorLevelDict returns a new Compressor that implements ZLib at supplied compression level with supplied dict | ||||||
|  | func ZLibCompressorLevelDict(level int, dict []byte) Compressor { | ||||||
|  | 	return &zlibCompressor{ | ||||||
|  | 		level: level, | ||||||
|  | 		dict:  dict, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *zlibCompressor) Reader(r io.Reader) (io.ReadCloser, error) { | ||||||
|  | 	return zlib.NewReaderDict(r, c.dict) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *zlibCompressor) Writer(w io.Writer) (io.WriteCloser, error) { | ||||||
|  | 	return zlib.NewWriterLevelDict(w, c.level, c.dict) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type snappyCompressor struct{} | ||||||
|  | 
 | ||||||
|  | // SnappyCompressor returns a new Compressor that implements Snappy | ||||||
|  | func SnappyCompressor() Compressor { | ||||||
|  | 	return &snappyCompressor{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *snappyCompressor) Reader(r io.Reader) (io.ReadCloser, error) { | ||||||
|  | 	return util.NopReadCloser(snappy.NewReader(r)), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *snappyCompressor) Writer(w io.Writer) (io.WriteCloser, error) { | ||||||
|  | 	return snappy.NewBufferedWriter(w), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type nopCompressor struct{} | ||||||
|  | 
 | ||||||
|  | // NoCompression is a Compressor that simply does nothing | ||||||
|  | func NoCompression() Compressor { | ||||||
|  | 	return &nopCompressor{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *nopCompressor) Reader(r io.Reader) (io.ReadCloser, error) { | ||||||
|  | 	return util.NopReadCloser(r), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *nopCompressor) Writer(w io.Writer) (io.WriteCloser, error) { | ||||||
|  | 	return util.NopWriteCloser(w), nil | ||||||
|  | } | ||||||
							
								
								
									
										289
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/disk.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										289
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/disk.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,289 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | 	"io/fs" | ||||||
|  | 	"os" | ||||||
|  | 	"path" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/fastpath" | ||||||
|  | 	"git.iim.gay/grufwub/go-bytes" | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // DefaultDiskConfig is the default DiskStorage configuration | ||||||
|  | var DefaultDiskConfig = &DiskConfig{ | ||||||
|  | 	Overwrite:    true, | ||||||
|  | 	WriteBufSize: 4096, | ||||||
|  | 	Transform:    NopTransform(), | ||||||
|  | 	Compression:  NoCompression(), | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DiskConfig defines options to be used when opening a DiskStorage | ||||||
|  | type DiskConfig struct { | ||||||
|  | 	// Transform is the supplied key<-->path KeyTransform | ||||||
|  | 	Transform KeyTransform | ||||||
|  | 
 | ||||||
|  | 	// WriteBufSize is the buffer size to use when writing file streams (PutStream) | ||||||
|  | 	WriteBufSize int | ||||||
|  | 
 | ||||||
|  | 	// Overwrite allows overwriting values of stored keys in the storage | ||||||
|  | 	Overwrite bool | ||||||
|  | 
 | ||||||
|  | 	// Compression is the Compressor to use when reading / writing files, default is no compression | ||||||
|  | 	Compression Compressor | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getDiskConfig returns a valid DiskConfig for supplied ptr | ||||||
|  | func getDiskConfig(cfg *DiskConfig) DiskConfig { | ||||||
|  | 	// If nil, use default | ||||||
|  | 	if cfg == nil { | ||||||
|  | 		cfg = DefaultDiskConfig | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume nil transform == none | ||||||
|  | 	if cfg.Transform == nil { | ||||||
|  | 		cfg.Transform = NopTransform() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume nil compress == none | ||||||
|  | 	if cfg.Compression == nil { | ||||||
|  | 		cfg.Compression = NoCompression() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume 0 buf size == use default | ||||||
|  | 	if cfg.WriteBufSize < 1 { | ||||||
|  | 		cfg.WriteBufSize = DefaultDiskConfig.WriteBufSize | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return owned config copy | ||||||
|  | 	return DiskConfig{ | ||||||
|  | 		Transform:    cfg.Transform, | ||||||
|  | 		WriteBufSize: cfg.WriteBufSize, | ||||||
|  | 		Overwrite:    cfg.Overwrite, | ||||||
|  | 		Compression:  cfg.Compression, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DiskStorage is a Storage implementation that stores directly to a filesystem | ||||||
|  | type DiskStorage struct { | ||||||
|  | 	path   string     // path is the root path of this store | ||||||
|  | 	dots   int        // dots is the "dotdot" count for the root store path | ||||||
|  | 	config DiskConfig // cfg is the supplied configuration for this store | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // OpenFile opens a DiskStorage instance for given folder path and configuration | ||||||
|  | func OpenFile(path string, cfg *DiskConfig) (*DiskStorage, error) { | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := util.AcquirePathBuilder() | ||||||
|  | 	defer util.ReleasePathBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Clean provided path, ensure ends in '/' (should | ||||||
|  | 	// be dir, this helps with file path trimming later) | ||||||
|  | 	path = pb.Clean(path) + "/" | ||||||
|  | 
 | ||||||
|  | 	// Get checked config | ||||||
|  | 	config := getDiskConfig(cfg) | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open dir path | ||||||
|  | 	file, err := os.OpenFile(path, defaultFileROFlags, defaultDirPerms) | ||||||
|  | 	if err != nil { | ||||||
|  | 		// If not a not-exist error, return | ||||||
|  | 		if !os.IsNotExist(err) { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Attempt to make store path dirs | ||||||
|  | 		err = os.MkdirAll(path, defaultDirPerms) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Reopen dir now it's been created | ||||||
|  | 		file, err = os.OpenFile(path, defaultFileROFlags, defaultDirPerms) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Double check this is a dir (NOT a file!) | ||||||
|  | 	stat, err := file.Stat() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} else if !stat.IsDir() { | ||||||
|  | 		return nil, errPathIsFile | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Return new DiskStorage | ||||||
|  | 	return &DiskStorage{ | ||||||
|  | 		path:   path, | ||||||
|  | 		dots:   util.CountDotdots(path), | ||||||
|  | 		config: config, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Clean implements Storage.Clean() | ||||||
|  | func (st *DiskStorage) Clean() error { | ||||||
|  | 	return util.CleanDirs(st.path) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadBytes implements Storage.ReadBytes() | ||||||
|  | func (st *DiskStorage) ReadBytes(key string) ([]byte, error) { | ||||||
|  | 	// Get stream reader for key | ||||||
|  | 	rc, err := st.ReadStream(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	defer rc.Close() | ||||||
|  | 
 | ||||||
|  | 	// Read all bytes and return | ||||||
|  | 	return io.ReadAll(rc) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadStream implements Storage.ReadStream() | ||||||
|  | func (st *DiskStorage) ReadStream(key string) (io.ReadCloser, error) { | ||||||
|  | 	// Get file path for key | ||||||
|  | 	kpath, err := st.filepath(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open file (replace ENOENT with our own) | ||||||
|  | 	file, err := open(kpath, defaultFileROFlags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, errSwapNotFound(err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Wrap the file in a compressor | ||||||
|  | 	cFile, err := st.config.Compression.Reader(file) | ||||||
|  | 	if err != nil { | ||||||
|  | 		file.Close() // close this here, ignore error | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Wrap compressor to ensure file close | ||||||
|  | 	return util.ReadCloserWithCallback(cFile, func() { | ||||||
|  | 		file.Close() | ||||||
|  | 	}), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteBytes implements Storage.WriteBytes() | ||||||
|  | func (st *DiskStorage) WriteBytes(key string, value []byte) error { | ||||||
|  | 	return st.WriteStream(key, bytes.NewReader(value)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteStream implements Storage.WriteStream() | ||||||
|  | func (st *DiskStorage) WriteStream(key string, r io.Reader) error { | ||||||
|  | 	// Get file path for key | ||||||
|  | 	kpath, err := st.filepath(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Ensure dirs leading up to file exist | ||||||
|  | 	err = os.MkdirAll(path.Dir(kpath), defaultDirPerms) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Prepare to swap error if need-be | ||||||
|  | 	errSwap := errSwapNoop | ||||||
|  | 
 | ||||||
|  | 	// Build file RW flags | ||||||
|  | 	flags := defaultFileRWFlags | ||||||
|  | 	if !st.config.Overwrite { | ||||||
|  | 		flags |= syscall.O_EXCL | ||||||
|  | 
 | ||||||
|  | 		// Catch + replace err exist | ||||||
|  | 		errSwap = errSwapExist | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to open file | ||||||
|  | 	file, err := open(kpath, flags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return errSwap(err) | ||||||
|  | 	} | ||||||
|  | 	defer file.Close() | ||||||
|  | 
 | ||||||
|  | 	// Wrap the file in a compressor | ||||||
|  | 	cFile, err := st.config.Compression.Writer(file) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	defer cFile.Close() | ||||||
|  | 
 | ||||||
|  | 	// Acquire write buffer | ||||||
|  | 	buf := util.AcquireBuffer(st.config.WriteBufSize) | ||||||
|  | 	defer util.ReleaseBuffer(buf) | ||||||
|  | 	buf.Grow(st.config.WriteBufSize) | ||||||
|  | 
 | ||||||
|  | 	// Copy reader to file | ||||||
|  | 	_, err = io.CopyBuffer(cFile, r, buf.B) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Stat implements Storage.Stat() | ||||||
|  | func (st *DiskStorage) Stat(key string) (bool, error) { | ||||||
|  | 	// Get file path for key | ||||||
|  | 	kpath, err := st.filepath(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return false, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Check for file on disk | ||||||
|  | 	return stat(kpath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Remove implements Storage.Remove() | ||||||
|  | func (st *DiskStorage) Remove(key string) error { | ||||||
|  | 	// Get file path for key | ||||||
|  | 	kpath, err := st.filepath(key) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Attempt to remove file | ||||||
|  | 	return os.Remove(kpath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WalkKeys implements Storage.WalkKeys() | ||||||
|  | func (st *DiskStorage) WalkKeys(opts *WalkKeysOptions) error { | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := fastpath.AcquireBuilder() | ||||||
|  | 	defer fastpath.ReleaseBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Walk dir for entries | ||||||
|  | 	return util.WalkDir(pb, st.path, func(kpath string, fsentry fs.DirEntry) { | ||||||
|  | 		// Only deal with regular files | ||||||
|  | 		if fsentry.Type().IsRegular() { | ||||||
|  | 			// Get full item path (without root) | ||||||
|  | 			kpath = pb.Join(kpath, fsentry.Name())[len(st.path):] | ||||||
|  | 
 | ||||||
|  | 			// Perform provided walk function | ||||||
|  | 			opts.WalkFn(entry(st.config.Transform.PathToKey(kpath))) | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // filepath checks and returns a formatted filepath for given key | ||||||
|  | func (st *DiskStorage) filepath(key string) (string, error) { | ||||||
|  | 	// Acquire path builder | ||||||
|  | 	pb := util.AcquirePathBuilder() | ||||||
|  | 	defer util.ReleasePathBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Calculate transformed key path | ||||||
|  | 	key = st.config.Transform.KeyToPath(key) | ||||||
|  | 
 | ||||||
|  | 	// Generated joined root path | ||||||
|  | 	pb.AppendString(st.path) | ||||||
|  | 	pb.AppendString(key) | ||||||
|  | 
 | ||||||
|  | 	// If path is dir traversal, and traverses FURTHER | ||||||
|  | 	// than store root, this is an error | ||||||
|  | 	if util.CountDotdots(pb.StringPtr()) > st.dots { | ||||||
|  | 		return "", ErrInvalidKey | ||||||
|  | 	} | ||||||
|  | 	return pb.String(), nil | ||||||
|  | } | ||||||
							
								
								
									
										63
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/errors.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/errors.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,63 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"syscall" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // errorString is our own simple error type | ||||||
|  | type errorString string | ||||||
|  | 
 | ||||||
|  | // Error implements error | ||||||
|  | func (e errorString) Error() string { | ||||||
|  | 	return string(e) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Extend appends extra information to an errorString | ||||||
|  | func (e errorString) Extend(s string, a ...interface{}) errorString { | ||||||
|  | 	return errorString(string(e) + ": " + fmt.Sprintf(s, a...)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// ErrNotFound is the error returned when a key cannot be found in storage | ||||||
|  | 	ErrNotFound = errorString("store/storage: key not found") | ||||||
|  | 
 | ||||||
|  | 	// ErrAlreadyExist is the error returned when a key already exists in storage | ||||||
|  | 	ErrAlreadyExists = errorString("store/storage: key already exists") | ||||||
|  | 
 | ||||||
|  | 	// ErrInvalidkey is the error returned when an invalid key is passed to storage | ||||||
|  | 	ErrInvalidKey = errorString("store/storage: invalid key") | ||||||
|  | 
 | ||||||
|  | 	// errPathIsFile is returned when a path for a disk config is actually a file | ||||||
|  | 	errPathIsFile = errorString("store/storage: path is file") | ||||||
|  | 
 | ||||||
|  | 	// errNoHashesWritten is returned when no blocks are written for given input value | ||||||
|  | 	errNoHashesWritten = errorString("storage/storage: no hashes written") | ||||||
|  | 
 | ||||||
|  | 	// errInvalidNode is returned when read on an invalid node in the store is attempted | ||||||
|  | 	errInvalidNode = errorString("store/storage: invalid node") | ||||||
|  | 
 | ||||||
|  | 	// errCorruptNodes is returned when nodes with missing blocks are found during a BlockStorage clean | ||||||
|  | 	errCorruptNodes = errorString("store/storage: corrupted nodes") | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // errSwapNoop performs no error swaps | ||||||
|  | func errSwapNoop(err error) error { | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ErrSwapNotFound swaps syscall.ENOENT for ErrNotFound | ||||||
|  | func errSwapNotFound(err error) error { | ||||||
|  | 	if err == syscall.ENOENT { | ||||||
|  | 		return ErrNotFound | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // errSwapExist swaps syscall.EEXIST for ErrAlreadyExists | ||||||
|  | func errSwapExist(err error) error { | ||||||
|  | 	if err == syscall.EEXIST { | ||||||
|  | 		return ErrAlreadyExists | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
							
								
								
									
										48
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/fs.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/fs.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,48 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	defaultDirPerms      = 0755 | ||||||
|  | 	defaultFilePerms     = 0644 | ||||||
|  | 	defaultFileROFlags   = syscall.O_RDONLY | ||||||
|  | 	defaultFileRWFlags   = syscall.O_CREAT | syscall.O_RDWR | ||||||
|  | 	defaultFileLockFlags = syscall.O_RDONLY | syscall.O_EXCL | syscall.O_CREAT | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // NOTE: | ||||||
|  | // These functions are for opening storage files, | ||||||
|  | // not necessarily for e.g. initial setup (OpenFile) | ||||||
|  | 
 | ||||||
|  | // open should not be called directly | ||||||
|  | func open(path string, flags int) (*os.File, error) { | ||||||
|  | 	var fd int | ||||||
|  | 	err := util.RetryOnEINTR(func() (err error) { | ||||||
|  | 		fd, err = syscall.Open(path, flags, defaultFilePerms) | ||||||
|  | 		return | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return os.NewFile(uintptr(fd), path), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stat checks for a file on disk | ||||||
|  | func stat(path string) (bool, error) { | ||||||
|  | 	var stat syscall.Stat_t | ||||||
|  | 	err := util.RetryOnEINTR(func() error { | ||||||
|  | 		return syscall.Stat(path, &stat) | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		if err == syscall.ENOENT { | ||||||
|  | 			err = nil | ||||||
|  | 		} | ||||||
|  | 		return false, err | ||||||
|  | 	} | ||||||
|  | 	return true, nil | ||||||
|  | } | ||||||
							
								
								
									
										34
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/lock.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/lock.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,34 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/go-store/util" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type lockableFile struct { | ||||||
|  | 	*os.File | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func openLock(path string) (*lockableFile, error) { | ||||||
|  | 	file, err := open(path, defaultFileLockFlags) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return &lockableFile{file}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f *lockableFile) lock() error { | ||||||
|  | 	return f.flock(syscall.LOCK_EX | syscall.LOCK_NB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f *lockableFile) unlock() error { | ||||||
|  | 	return f.flock(syscall.LOCK_UN | syscall.LOCK_NB) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f *lockableFile) flock(how int) error { | ||||||
|  | 	return util.RetryOnEINTR(func() error { | ||||||
|  | 		return syscall.Flock(int(f.Fd()), how) | ||||||
|  | 	}) | ||||||
|  | } | ||||||
							
								
								
									
										51
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/storage.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/storage.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,51 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // StorageEntry defines a key in Storage | ||||||
|  | type StorageEntry interface { | ||||||
|  | 	// Key returns the storage entry's key | ||||||
|  | 	Key() string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // entry is the simplest possible StorageEntry | ||||||
|  | type entry string | ||||||
|  | 
 | ||||||
|  | func (e entry) Key() string { | ||||||
|  | 	return string(e) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Storage defines a means of storing and accessing key value pairs | ||||||
|  | type Storage interface { | ||||||
|  | 	// Clean removes unused values and unclutters the storage (e.g. removing empty folders) | ||||||
|  | 	Clean() error | ||||||
|  | 
 | ||||||
|  | 	// ReadBytes returns the byte value for key in storage | ||||||
|  | 	ReadBytes(key string) ([]byte, error) | ||||||
|  | 
 | ||||||
|  | 	// ReadStream returns an io.ReadCloser for the value bytes at key in the storage | ||||||
|  | 	ReadStream(key string) (io.ReadCloser, error) | ||||||
|  | 
 | ||||||
|  | 	// WriteBytes writes the supplied value bytes at key in the storage | ||||||
|  | 	WriteBytes(key string, value []byte) error | ||||||
|  | 
 | ||||||
|  | 	// WriteStream writes the bytes from supplied reader at key in the storage | ||||||
|  | 	WriteStream(key string, r io.Reader) error | ||||||
|  | 
 | ||||||
|  | 	// Stat checks if the supplied key is in the storage | ||||||
|  | 	Stat(key string) (bool, error) | ||||||
|  | 
 | ||||||
|  | 	// Remove attempts to remove the supplied key-value pair from storage | ||||||
|  | 	Remove(key string) error | ||||||
|  | 
 | ||||||
|  | 	// WalkKeys walks the keys in the storage | ||||||
|  | 	WalkKeys(opts *WalkKeysOptions) error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WalkKeysOptions defines how to walk the keys in a storage implementation | ||||||
|  | type WalkKeysOptions struct { | ||||||
|  | 	// WalkFn is the function to apply on each StorageEntry | ||||||
|  | 	WalkFn func(StorageEntry) | ||||||
|  | } | ||||||
							
								
								
									
										25
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/transform.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/git.iim.gay/grufwub/go-store/storage/transform.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,25 @@ | ||||||
|  | package storage | ||||||
|  | 
 | ||||||
|  | // KeyTransform defines a method of converting store keys to storage paths (and vice-versa) | ||||||
|  | type KeyTransform interface { | ||||||
|  | 	// KeyToPath converts a supplied key to storage path | ||||||
|  | 	KeyToPath(string) string | ||||||
|  | 
 | ||||||
|  | 	// PathToKey converts a supplied storage path to key | ||||||
|  | 	PathToKey(string) string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type nopKeyTransform struct{} | ||||||
|  | 
 | ||||||
|  | // NopTransform returns a nop key transform (i.e. key = path) | ||||||
|  | func NopTransform() KeyTransform { | ||||||
|  | 	return &nopKeyTransform{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *nopKeyTransform) KeyToPath(key string) string { | ||||||
|  | 	return key | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *nopKeyTransform) PathToKey(path string) string { | ||||||
|  | 	return path | ||||||
|  | } | ||||||
							
								
								
									
										105
									
								
								vendor/git.iim.gay/grufwub/go-store/util/fs.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										105
									
								
								vendor/git.iim.gay/grufwub/go-store/util/fs.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,105 @@ | ||||||
|  | package util | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io/fs" | ||||||
|  | 	"os" | ||||||
|  | 	"strings" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/fastpath" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var dotdot = "../" | ||||||
|  | 
 | ||||||
|  | // CountDotdots returns the number of "dot-dots" (../) in a cleaned filesystem path | ||||||
|  | func CountDotdots(path string) int { | ||||||
|  | 	if !strings.HasSuffix(path, dotdot) { | ||||||
|  | 		return 0 | ||||||
|  | 	} | ||||||
|  | 	return strings.Count(path, dotdot) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WalkDir traverses the dir tree of the supplied path, performing the supplied walkFn on each entry | ||||||
|  | func WalkDir(pb *fastpath.Builder, path string, walkFn func(string, fs.DirEntry)) error { | ||||||
|  | 	// Read supplied dir path | ||||||
|  | 	dirEntries, err := os.ReadDir(path) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Iter entries | ||||||
|  | 	for _, entry := range dirEntries { | ||||||
|  | 		// Pass to walk fn | ||||||
|  | 		walkFn(path, entry) | ||||||
|  | 
 | ||||||
|  | 		// Recurse dir entries | ||||||
|  | 		if entry.IsDir() { | ||||||
|  | 			err = WalkDir(pb, pb.Join(path, entry.Name()), walkFn) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CleanDirs traverses the dir tree of the supplied path, removing any folders with zero children | ||||||
|  | func CleanDirs(path string) error { | ||||||
|  | 	// Acquire builder | ||||||
|  | 	pb := AcquirePathBuilder() | ||||||
|  | 	defer ReleasePathBuilder(pb) | ||||||
|  | 
 | ||||||
|  | 	// Get dir entries | ||||||
|  | 	entries, err := os.ReadDir(path) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Recurse dirs | ||||||
|  | 	for _, entry := range entries { | ||||||
|  | 		if entry.IsDir() { | ||||||
|  | 			err := cleanDirs(pb, pb.Join(path, entry.Name())) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // cleanDirs performs the actual dir cleaning logic for the exported version | ||||||
|  | func cleanDirs(pb *fastpath.Builder, path string) error { | ||||||
|  | 	// Get dir entries | ||||||
|  | 	entries, err := os.ReadDir(path) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// If no entries, delete | ||||||
|  | 	if len(entries) < 1 { | ||||||
|  | 		return os.Remove(path) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Recurse dirs | ||||||
|  | 	for _, entry := range entries { | ||||||
|  | 		if entry.IsDir() { | ||||||
|  | 			err := cleanDirs(pb, pb.Join(path, entry.Name())) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RetryOnEINTR is a low-level filesystem function for retrying syscalls on O_EINTR received | ||||||
|  | func RetryOnEINTR(do func() error) error { | ||||||
|  | 	for { | ||||||
|  | 		err := do() | ||||||
|  | 		if err == syscall.EINTR { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										42
									
								
								vendor/git.iim.gay/grufwub/go-store/util/io.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								vendor/git.iim.gay/grufwub/go-store/util/io.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,42 @@ | ||||||
|  | package util | ||||||
|  | 
 | ||||||
|  | import "io" | ||||||
|  | 
 | ||||||
|  | // NopReadCloser turns a supplied io.Reader into io.ReadCloser with a nop Close() implementation | ||||||
|  | func NopReadCloser(r io.Reader) io.ReadCloser { | ||||||
|  | 	return &nopReadCloser{r} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NopWriteCloser turns a supplied io.Writer into io.WriteCloser with a nop Close() implementation | ||||||
|  | func NopWriteCloser(w io.Writer) io.WriteCloser { | ||||||
|  | 	return &nopWriteCloser{w} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadCloserWithCallback adds a customizable callback to be called upon Close() of a supplied io.ReadCloser | ||||||
|  | func ReadCloserWithCallback(rc io.ReadCloser, cb func()) io.ReadCloser { | ||||||
|  | 	return &callbackReadCloser{ | ||||||
|  | 		ReadCloser: rc, | ||||||
|  | 		callback:   cb, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // nopReadCloser turns an io.Reader -> io.ReadCloser with a nop Close() | ||||||
|  | type nopReadCloser struct{ io.Reader } | ||||||
|  | 
 | ||||||
|  | func (r *nopReadCloser) Close() error { return nil } | ||||||
|  | 
 | ||||||
|  | // nopWriteCloser turns an io.Writer -> io.WriteCloser with a nop Close() | ||||||
|  | type nopWriteCloser struct{ io.Writer } | ||||||
|  | 
 | ||||||
|  | func (w nopWriteCloser) Close() error { return nil } | ||||||
|  | 
 | ||||||
|  | // callbackReadCloser allows adding our own custom callback to an io.ReadCloser | ||||||
|  | type callbackReadCloser struct { | ||||||
|  | 	io.ReadCloser | ||||||
|  | 	callback func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *callbackReadCloser) Close() error { | ||||||
|  | 	defer c.callback() | ||||||
|  | 	return c.ReadCloser.Close() | ||||||
|  | } | ||||||
							
								
								
									
										6
									
								
								vendor/git.iim.gay/grufwub/go-store/util/nocopy.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/git.iim.gay/grufwub/go-store/util/nocopy.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,6 @@ | ||||||
|  | package util | ||||||
|  | 
 | ||||||
|  | type NoCopy struct{} | ||||||
|  | 
 | ||||||
|  | func (*NoCopy) Lock()   {} | ||||||
|  | func (*NoCopy) Unlock() {} | ||||||
							
								
								
									
										44
									
								
								vendor/git.iim.gay/grufwub/go-store/util/pools.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								vendor/git.iim.gay/grufwub/go-store/util/pools.go
									
										
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,44 @@ | ||||||
|  | package util | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"git.iim.gay/grufwub/fastpath" | ||||||
|  | 	"git.iim.gay/grufwub/go-bufpool" | ||||||
|  | 	"git.iim.gay/grufwub/go-bytes" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // pathBuilderPool is the global fastpath.Builder pool, we implement | ||||||
|  | // our own here instead of using fastpath's default one because we | ||||||
|  | // don't want to deal with fastpath's sync.Once locks on every Acquire/Release | ||||||
|  | var pathBuilderPool = sync.Pool{ | ||||||
|  | 	New: func() interface{} { | ||||||
|  | 		pb := fastpath.NewBuilder(make([]byte, 0, 512)) | ||||||
|  | 		return &pb | ||||||
|  | 	}, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AcquirePathBuilder returns a reset fastpath.Builder instance | ||||||
|  | func AcquirePathBuilder() *fastpath.Builder { | ||||||
|  | 	return pathBuilderPool.Get().(*fastpath.Builder) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReleasePathBuilder resets and releases provided fastpath.Builder instance to global pool | ||||||
|  | func ReleasePathBuilder(pb *fastpath.Builder) { | ||||||
|  | 	pb.Reset() | ||||||
|  | 	pathBuilderPool.Put(pb) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // bufferPool is the global BufferPool, we implement this here | ||||||
|  | // so we can share allocations across whatever libaries need them. | ||||||
|  | var bufferPool = bufpool.BufferPool{} | ||||||
|  | 
 | ||||||
|  | // AcquireBuffer returns a reset bytes.Buffer with at least requested capacity | ||||||
|  | func AcquireBuffer(cap int) *bytes.Buffer { | ||||||
|  | 	return bufferPool.Get(cap) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReleaseBuffer resets and releases provided bytes.Buffer to global BufferPool | ||||||
|  | func ReleaseBuffer(buf *bytes.Buffer) { | ||||||
|  | 	bufferPool.Put(buf) | ||||||
|  | } | ||||||
							
								
								
									
										16
									
								
								vendor/github.com/golang/snappy/.gitignore
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								vendor/github.com/golang/snappy/.gitignore
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,16 @@ | ||||||
|  | cmd/snappytool/snappytool | ||||||
|  | testdata/bench | ||||||
|  | 
 | ||||||
|  | # These explicitly listed benchmark data files are for an obsolete version of | ||||||
|  | # snappy_test.go. | ||||||
|  | testdata/alice29.txt | ||||||
|  | testdata/asyoulik.txt | ||||||
|  | testdata/fireworks.jpeg | ||||||
|  | testdata/geo.protodata | ||||||
|  | testdata/html | ||||||
|  | testdata/html_x_4 | ||||||
|  | testdata/kppkn.gtb | ||||||
|  | testdata/lcet10.txt | ||||||
|  | testdata/paper-100k.pdf | ||||||
|  | testdata/plrabn12.txt | ||||||
|  | testdata/urls.10K | ||||||
							
								
								
									
										17
									
								
								vendor/github.com/golang/snappy/AUTHORS
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/golang/snappy/AUTHORS
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,17 @@ | ||||||
|  | # This is the official list of Snappy-Go authors for copyright purposes. | ||||||
|  | # This file is distinct from the CONTRIBUTORS files. | ||||||
|  | # See the latter for an explanation. | ||||||
|  | 
 | ||||||
|  | # Names should be added to this file as | ||||||
|  | #	Name or Organization <email address> | ||||||
|  | # The email address is not required for organizations. | ||||||
|  | 
 | ||||||
|  | # Please keep the list sorted. | ||||||
|  | 
 | ||||||
|  | Amazon.com, Inc | ||||||
|  | Damian Gryski <dgryski@gmail.com> | ||||||
|  | Google Inc. | ||||||
|  | Jan Mercl <0xjnml@gmail.com> | ||||||
|  | Klaus Post <klauspost@gmail.com> | ||||||
|  | Rodolfo Carvalho <rhcarvalho@gmail.com> | ||||||
|  | Sebastien Binet <seb.binet@gmail.com> | ||||||
							
								
								
									
										39
									
								
								vendor/github.com/golang/snappy/CONTRIBUTORS
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/github.com/golang/snappy/CONTRIBUTORS
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,39 @@ | ||||||
|  | # This is the official list of people who can contribute | ||||||
|  | # (and typically have contributed) code to the Snappy-Go repository. | ||||||
|  | # The AUTHORS file lists the copyright holders; this file | ||||||
|  | # lists people.  For example, Google employees are listed here | ||||||
|  | # but not in AUTHORS, because Google holds the copyright. | ||||||
|  | # | ||||||
|  | # The submission process automatically checks to make sure | ||||||
|  | # that people submitting code are listed in this file (by email address). | ||||||
|  | # | ||||||
|  | # Names should be added to this file only after verifying that | ||||||
|  | # the individual or the individual's organization has agreed to | ||||||
|  | # the appropriate Contributor License Agreement, found here: | ||||||
|  | # | ||||||
|  | #     http://code.google.com/legal/individual-cla-v1.0.html | ||||||
|  | #     http://code.google.com/legal/corporate-cla-v1.0.html | ||||||
|  | # | ||||||
|  | # The agreement for individuals can be filled out on the web. | ||||||
|  | # | ||||||
|  | # When adding J Random Contributor's name to this file, | ||||||
|  | # either J's name or J's organization's name should be | ||||||
|  | # added to the AUTHORS file, depending on whether the | ||||||
|  | # individual or corporate CLA was used. | ||||||
|  | 
 | ||||||
|  | # Names should be added to this file like so: | ||||||
|  | #     Name <email address> | ||||||
|  | 
 | ||||||
|  | # Please keep the list sorted. | ||||||
|  | 
 | ||||||
|  | Damian Gryski <dgryski@gmail.com> | ||||||
|  | Jan Mercl <0xjnml@gmail.com> | ||||||
|  | Jonathan Swinney <jswinney@amazon.com> | ||||||
|  | Kai Backman <kaib@golang.org> | ||||||
|  | Klaus Post <klauspost@gmail.com> | ||||||
|  | Marc-Antoine Ruel <maruel@chromium.org> | ||||||
|  | Nigel Tao <nigeltao@golang.org> | ||||||
|  | Rob Pike <r@golang.org> | ||||||
|  | Rodolfo Carvalho <rhcarvalho@gmail.com> | ||||||
|  | Russ Cox <rsc@golang.org> | ||||||
|  | Sebastien Binet <seb.binet@gmail.com> | ||||||
							
								
								
									
										27
									
								
								vendor/github.com/golang/snappy/LICENSE
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/golang/snappy/LICENSE
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,27 @@ | ||||||
|  | Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | ||||||
|  | 
 | ||||||
|  | Redistribution and use in source and binary forms, with or without | ||||||
|  | modification, are permitted provided that the following conditions are | ||||||
|  | met: | ||||||
|  | 
 | ||||||
|  |    * Redistributions of source code must retain the above copyright | ||||||
|  | notice, this list of conditions and the following disclaimer. | ||||||
|  |    * Redistributions in binary form must reproduce the above | ||||||
|  | copyright notice, this list of conditions and the following disclaimer | ||||||
|  | in the documentation and/or other materials provided with the | ||||||
|  | distribution. | ||||||
|  |    * Neither the name of Google Inc. nor the names of its | ||||||
|  | contributors may be used to endorse or promote products derived from | ||||||
|  | this software without specific prior written permission. | ||||||
|  | 
 | ||||||
|  | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||||
|  | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||||
|  | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||||
|  | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||||
|  | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||||
|  | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||||
|  | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||||
|  | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||||
|  | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||||
|  | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||||
|  | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||||
							
								
								
									
										107
									
								
								vendor/github.com/golang/snappy/README
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								vendor/github.com/golang/snappy/README
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,107 @@ | ||||||
|  | The Snappy compression format in the Go programming language. | ||||||
|  | 
 | ||||||
|  | To download and install from source: | ||||||
|  | $ go get github.com/golang/snappy | ||||||
|  | 
 | ||||||
|  | Unless otherwise noted, the Snappy-Go source files are distributed | ||||||
|  | under the BSD-style license found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | Benchmarks. | ||||||
|  | 
 | ||||||
|  | The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten | ||||||
|  | or so files, the same set used by the C++ Snappy code (github.com/google/snappy | ||||||
|  | and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ | ||||||
|  | 3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: | ||||||
|  | 
 | ||||||
|  | "go test -test.bench=." | ||||||
|  | 
 | ||||||
|  | _UFlat0-8         2.19GB/s ± 0%  html | ||||||
|  | _UFlat1-8         1.41GB/s ± 0%  urls | ||||||
|  | _UFlat2-8         23.5GB/s ± 2%  jpg | ||||||
|  | _UFlat3-8         1.91GB/s ± 0%  jpg_200 | ||||||
|  | _UFlat4-8         14.0GB/s ± 1%  pdf | ||||||
|  | _UFlat5-8         1.97GB/s ± 0%  html4 | ||||||
|  | _UFlat6-8          814MB/s ± 0%  txt1 | ||||||
|  | _UFlat7-8          785MB/s ± 0%  txt2 | ||||||
|  | _UFlat8-8          857MB/s ± 0%  txt3 | ||||||
|  | _UFlat9-8          719MB/s ± 1%  txt4 | ||||||
|  | _UFlat10-8        2.84GB/s ± 0%  pb | ||||||
|  | _UFlat11-8        1.05GB/s ± 0%  gaviota | ||||||
|  | 
 | ||||||
|  | _ZFlat0-8         1.04GB/s ± 0%  html | ||||||
|  | _ZFlat1-8          534MB/s ± 0%  urls | ||||||
|  | _ZFlat2-8         15.7GB/s ± 1%  jpg | ||||||
|  | _ZFlat3-8          740MB/s ± 3%  jpg_200 | ||||||
|  | _ZFlat4-8         9.20GB/s ± 1%  pdf | ||||||
|  | _ZFlat5-8          991MB/s ± 0%  html4 | ||||||
|  | _ZFlat6-8          379MB/s ± 0%  txt1 | ||||||
|  | _ZFlat7-8          352MB/s ± 0%  txt2 | ||||||
|  | _ZFlat8-8          396MB/s ± 1%  txt3 | ||||||
|  | _ZFlat9-8          327MB/s ± 1%  txt4 | ||||||
|  | _ZFlat10-8        1.33GB/s ± 1%  pb | ||||||
|  | _ZFlat11-8         605MB/s ± 1%  gaviota | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | "go test -test.bench=. -tags=noasm" | ||||||
|  | 
 | ||||||
|  | _UFlat0-8          621MB/s ± 2%  html | ||||||
|  | _UFlat1-8          494MB/s ± 1%  urls | ||||||
|  | _UFlat2-8         23.2GB/s ± 1%  jpg | ||||||
|  | _UFlat3-8         1.12GB/s ± 1%  jpg_200 | ||||||
|  | _UFlat4-8         4.35GB/s ± 1%  pdf | ||||||
|  | _UFlat5-8          609MB/s ± 0%  html4 | ||||||
|  | _UFlat6-8          296MB/s ± 0%  txt1 | ||||||
|  | _UFlat7-8          288MB/s ± 0%  txt2 | ||||||
|  | _UFlat8-8          309MB/s ± 1%  txt3 | ||||||
|  | _UFlat9-8          280MB/s ± 1%  txt4 | ||||||
|  | _UFlat10-8         753MB/s ± 0%  pb | ||||||
|  | _UFlat11-8         400MB/s ± 0%  gaviota | ||||||
|  | 
 | ||||||
|  | _ZFlat0-8          409MB/s ± 1%  html | ||||||
|  | _ZFlat1-8          250MB/s ± 1%  urls | ||||||
|  | _ZFlat2-8         12.3GB/s ± 1%  jpg | ||||||
|  | _ZFlat3-8          132MB/s ± 0%  jpg_200 | ||||||
|  | _ZFlat4-8         2.92GB/s ± 0%  pdf | ||||||
|  | _ZFlat5-8          405MB/s ± 1%  html4 | ||||||
|  | _ZFlat6-8          179MB/s ± 1%  txt1 | ||||||
|  | _ZFlat7-8          170MB/s ± 1%  txt2 | ||||||
|  | _ZFlat8-8          189MB/s ± 1%  txt3 | ||||||
|  | _ZFlat9-8          164MB/s ± 1%  txt4 | ||||||
|  | _ZFlat10-8         479MB/s ± 1%  pb | ||||||
|  | _ZFlat11-8         270MB/s ± 1%  gaviota | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | For comparison (Go's encoded output is byte-for-byte identical to C++'s), here | ||||||
|  | are the numbers from C++ Snappy's | ||||||
|  | 
 | ||||||
|  | make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log | ||||||
|  | 
 | ||||||
|  | BM_UFlat/0     2.4GB/s  html | ||||||
|  | BM_UFlat/1     1.4GB/s  urls | ||||||
|  | BM_UFlat/2    21.8GB/s  jpg | ||||||
|  | BM_UFlat/3     1.5GB/s  jpg_200 | ||||||
|  | BM_UFlat/4    13.3GB/s  pdf | ||||||
|  | BM_UFlat/5     2.1GB/s  html4 | ||||||
|  | BM_UFlat/6     1.0GB/s  txt1 | ||||||
|  | BM_UFlat/7   959.4MB/s  txt2 | ||||||
|  | BM_UFlat/8     1.0GB/s  txt3 | ||||||
|  | BM_UFlat/9   864.5MB/s  txt4 | ||||||
|  | BM_UFlat/10    2.9GB/s  pb | ||||||
|  | BM_UFlat/11    1.2GB/s  gaviota | ||||||
|  | 
 | ||||||
|  | BM_ZFlat/0   944.3MB/s  html (22.31 %) | ||||||
|  | BM_ZFlat/1   501.6MB/s  urls (47.78 %) | ||||||
|  | BM_ZFlat/2    14.3GB/s  jpg (99.95 %) | ||||||
|  | BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %) | ||||||
|  | BM_ZFlat/4     8.3GB/s  pdf (83.30 %) | ||||||
|  | BM_ZFlat/5   903.5MB/s  html4 (22.52 %) | ||||||
|  | BM_ZFlat/6   336.0MB/s  txt1 (57.88 %) | ||||||
|  | BM_ZFlat/7   312.3MB/s  txt2 (61.91 %) | ||||||
|  | BM_ZFlat/8   353.1MB/s  txt3 (54.99 %) | ||||||
|  | BM_ZFlat/9   289.9MB/s  txt4 (66.26 %) | ||||||
|  | BM_ZFlat/10    1.2GB/s  pb (19.68 %) | ||||||
|  | BM_ZFlat/11  527.4MB/s  gaviota (37.72 %) | ||||||
							
								
								
									
										241
									
								
								vendor/github.com/golang/snappy/decode.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										241
									
								
								vendor/github.com/golang/snappy/decode.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,241 @@ | ||||||
|  | // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | package snappy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"encoding/binary" | ||||||
|  | 	"errors" | ||||||
|  | 	"io" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// ErrCorrupt reports that the input is invalid. | ||||||
|  | 	ErrCorrupt = errors.New("snappy: corrupt input") | ||||||
|  | 	// ErrTooLarge reports that the uncompressed length is too large. | ||||||
|  | 	ErrTooLarge = errors.New("snappy: decoded block is too large") | ||||||
|  | 	// ErrUnsupported reports that the input isn't supported. | ||||||
|  | 	ErrUnsupported = errors.New("snappy: unsupported input") | ||||||
|  | 
 | ||||||
|  | 	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // DecodedLen returns the length of the decoded block. | ||||||
|  | func DecodedLen(src []byte) (int, error) { | ||||||
|  | 	v, _, err := decodedLen(src) | ||||||
|  | 	return v, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // decodedLen returns the length of the decoded block and the number of bytes | ||||||
|  | // that the length header occupied. | ||||||
|  | func decodedLen(src []byte) (blockLen, headerLen int, err error) { | ||||||
|  | 	v, n := binary.Uvarint(src) | ||||||
|  | 	if n <= 0 || v > 0xffffffff { | ||||||
|  | 		return 0, 0, ErrCorrupt | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	const wordSize = 32 << (^uint(0) >> 32 & 1) | ||||||
|  | 	if wordSize == 32 && v > 0x7fffffff { | ||||||
|  | 		return 0, 0, ErrTooLarge | ||||||
|  | 	} | ||||||
|  | 	return int(v), n, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	decodeErrCodeCorrupt                  = 1 | ||||||
|  | 	decodeErrCodeUnsupportedLiteralLength = 2 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Decode returns the decoded form of src. The returned slice may be a sub- | ||||||
|  | // slice of dst if dst was large enough to hold the entire decoded block. | ||||||
|  | // Otherwise, a newly allocated slice will be returned. | ||||||
|  | // | ||||||
|  | // The dst and src must not overlap. It is valid to pass a nil dst. | ||||||
|  | // | ||||||
|  | // Decode handles the Snappy block format, not the Snappy stream format. | ||||||
|  | func Decode(dst, src []byte) ([]byte, error) { | ||||||
|  | 	dLen, s, err := decodedLen(src) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	if dLen <= len(dst) { | ||||||
|  | 		dst = dst[:dLen] | ||||||
|  | 	} else { | ||||||
|  | 		dst = make([]byte, dLen) | ||||||
|  | 	} | ||||||
|  | 	switch decode(dst, src[s:]) { | ||||||
|  | 	case 0: | ||||||
|  | 		return dst, nil | ||||||
|  | 	case decodeErrCodeUnsupportedLiteralLength: | ||||||
|  | 		return nil, errUnsupportedLiteralLength | ||||||
|  | 	} | ||||||
|  | 	return nil, ErrCorrupt | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewReader returns a new Reader that decompresses from r, using the framing | ||||||
|  | // format described at | ||||||
|  | // https://github.com/google/snappy/blob/master/framing_format.txt | ||||||
|  | func NewReader(r io.Reader) *Reader { | ||||||
|  | 	return &Reader{ | ||||||
|  | 		r:       r, | ||||||
|  | 		decoded: make([]byte, maxBlockSize), | ||||||
|  | 		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Reader is an io.Reader that can read Snappy-compressed bytes. | ||||||
|  | // | ||||||
|  | // Reader handles the Snappy stream format, not the Snappy block format. | ||||||
|  | type Reader struct { | ||||||
|  | 	r       io.Reader | ||||||
|  | 	err     error | ||||||
|  | 	decoded []byte | ||||||
|  | 	buf     []byte | ||||||
|  | 	// decoded[i:j] contains decoded bytes that have not yet been passed on. | ||||||
|  | 	i, j       int | ||||||
|  | 	readHeader bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Reset discards any buffered data, resets all state, and switches the Snappy | ||||||
|  | // reader to read from r. This permits reusing a Reader rather than allocating | ||||||
|  | // a new one. | ||||||
|  | func (r *Reader) Reset(reader io.Reader) { | ||||||
|  | 	r.r = reader | ||||||
|  | 	r.err = nil | ||||||
|  | 	r.i = 0 | ||||||
|  | 	r.j = 0 | ||||||
|  | 	r.readHeader = false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { | ||||||
|  | 	if _, r.err = io.ReadFull(r.r, p); r.err != nil { | ||||||
|  | 		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { | ||||||
|  | 			r.err = ErrCorrupt | ||||||
|  | 		} | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read satisfies the io.Reader interface. | ||||||
|  | func (r *Reader) Read(p []byte) (int, error) { | ||||||
|  | 	if r.err != nil { | ||||||
|  | 		return 0, r.err | ||||||
|  | 	} | ||||||
|  | 	for { | ||||||
|  | 		if r.i < r.j { | ||||||
|  | 			n := copy(p, r.decoded[r.i:r.j]) | ||||||
|  | 			r.i += n | ||||||
|  | 			return n, nil | ||||||
|  | 		} | ||||||
|  | 		if !r.readFull(r.buf[:4], true) { | ||||||
|  | 			return 0, r.err | ||||||
|  | 		} | ||||||
|  | 		chunkType := r.buf[0] | ||||||
|  | 		if !r.readHeader { | ||||||
|  | 			if chunkType != chunkTypeStreamIdentifier { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			r.readHeader = true | ||||||
|  | 		} | ||||||
|  | 		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 | ||||||
|  | 		if chunkLen > len(r.buf) { | ||||||
|  | 			r.err = ErrUnsupported | ||||||
|  | 			return 0, r.err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// The chunk types are specified at | ||||||
|  | 		// https://github.com/google/snappy/blob/master/framing_format.txt | ||||||
|  | 		switch chunkType { | ||||||
|  | 		case chunkTypeCompressedData: | ||||||
|  | 			// Section 4.2. Compressed data (chunk type 0x00). | ||||||
|  | 			if chunkLen < checksumSize { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			buf := r.buf[:chunkLen] | ||||||
|  | 			if !r.readFull(buf, false) { | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | ||||||
|  | 			buf = buf[checksumSize:] | ||||||
|  | 
 | ||||||
|  | 			n, err := DecodedLen(buf) | ||||||
|  | 			if err != nil { | ||||||
|  | 				r.err = err | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			if n > len(r.decoded) { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			if _, err := Decode(r.decoded, buf); err != nil { | ||||||
|  | 				r.err = err | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			if crc(r.decoded[:n]) != checksum { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			r.i, r.j = 0, n | ||||||
|  | 			continue | ||||||
|  | 
 | ||||||
|  | 		case chunkTypeUncompressedData: | ||||||
|  | 			// Section 4.3. Uncompressed data (chunk type 0x01). | ||||||
|  | 			if chunkLen < checksumSize { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			buf := r.buf[:checksumSize] | ||||||
|  | 			if !r.readFull(buf, false) { | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | ||||||
|  | 			// Read directly into r.decoded instead of via r.buf. | ||||||
|  | 			n := chunkLen - checksumSize | ||||||
|  | 			if n > len(r.decoded) { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			if !r.readFull(r.decoded[:n], false) { | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			if crc(r.decoded[:n]) != checksum { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			r.i, r.j = 0, n | ||||||
|  | 			continue | ||||||
|  | 
 | ||||||
|  | 		case chunkTypeStreamIdentifier: | ||||||
|  | 			// Section 4.1. Stream identifier (chunk type 0xff). | ||||||
|  | 			if chunkLen != len(magicBody) { | ||||||
|  | 				r.err = ErrCorrupt | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			if !r.readFull(r.buf[:len(magicBody)], false) { | ||||||
|  | 				return 0, r.err | ||||||
|  | 			} | ||||||
|  | 			for i := 0; i < len(magicBody); i++ { | ||||||
|  | 				if r.buf[i] != magicBody[i] { | ||||||
|  | 					r.err = ErrCorrupt | ||||||
|  | 					return 0, r.err | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if chunkType <= 0x7f { | ||||||
|  | 			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). | ||||||
|  | 			r.err = ErrUnsupported | ||||||
|  | 			return 0, r.err | ||||||
|  | 		} | ||||||
|  | 		// Section 4.4 Padding (chunk type 0xfe). | ||||||
|  | 		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). | ||||||
|  | 		if !r.readFull(r.buf[:chunkLen], false) { | ||||||
|  | 			return 0, r.err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										490
									
								
								vendor/github.com/golang/snappy/decode_amd64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										490
									
								
								vendor/github.com/golang/snappy/decode_amd64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,490 @@ | ||||||
|  | // Copyright 2016 The Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | 
 | ||||||
|  | #include "textflag.h" | ||||||
|  | 
 | ||||||
|  | // The asm code generally follows the pure Go code in decode_other.go, except | ||||||
|  | // where marked with a "!!!". | ||||||
|  | 
 | ||||||
|  | // func decode(dst, src []byte) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The non-zero stack size is only to | ||||||
|  | // spill registers and push args when issuing a CALL. The register allocation: | ||||||
|  | //	- AX	scratch | ||||||
|  | //	- BX	scratch | ||||||
|  | //	- CX	length or x | ||||||
|  | //	- DX	offset | ||||||
|  | //	- SI	&src[s] | ||||||
|  | //	- DI	&dst[d] | ||||||
|  | //	+ R8	dst_base | ||||||
|  | //	+ R9	dst_len | ||||||
|  | //	+ R10	dst_base + dst_len | ||||||
|  | //	+ R11	src_base | ||||||
|  | //	+ R12	src_len | ||||||
|  | //	+ R13	src_base + src_len | ||||||
|  | //	- R14	used by doCopy | ||||||
|  | //	- R15	used by doCopy | ||||||
|  | // | ||||||
|  | // The registers R8-R13 (marked with a "+") are set at the start of the | ||||||
|  | // function, and after a CALL returns, and are not otherwise modified. | ||||||
|  | // | ||||||
|  | // The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI. | ||||||
|  | // The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. | ||||||
|  | TEXT ·decode(SB), NOSPLIT, $48-56 | ||||||
|  | 	// Initialize SI, DI and R8-R13. | ||||||
|  | 	MOVQ dst_base+0(FP), R8 | ||||||
|  | 	MOVQ dst_len+8(FP), R9 | ||||||
|  | 	MOVQ R8, DI | ||||||
|  | 	MOVQ R8, R10 | ||||||
|  | 	ADDQ R9, R10 | ||||||
|  | 	MOVQ src_base+24(FP), R11 | ||||||
|  | 	MOVQ src_len+32(FP), R12 | ||||||
|  | 	MOVQ R11, SI | ||||||
|  | 	MOVQ R11, R13 | ||||||
|  | 	ADDQ R12, R13 | ||||||
|  | 
 | ||||||
|  | loop: | ||||||
|  | 	// for s < len(src) | ||||||
|  | 	CMPQ SI, R13 | ||||||
|  | 	JEQ  end | ||||||
|  | 
 | ||||||
|  | 	// CX = uint32(src[s]) | ||||||
|  | 	// | ||||||
|  | 	// switch src[s] & 0x03 | ||||||
|  | 	MOVBLZX (SI), CX | ||||||
|  | 	MOVL    CX, BX | ||||||
|  | 	ANDL    $3, BX | ||||||
|  | 	CMPL    BX, $1 | ||||||
|  | 	JAE     tagCopy | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// The code below handles literal tags. | ||||||
|  | 
 | ||||||
|  | 	// case tagLiteral: | ||||||
|  | 	// x := uint32(src[s] >> 2) | ||||||
|  | 	// switch | ||||||
|  | 	SHRL $2, CX | ||||||
|  | 	CMPL CX, $60 | ||||||
|  | 	JAE  tagLit60Plus | ||||||
|  | 
 | ||||||
|  | 	// case x < 60: | ||||||
|  | 	// s++ | ||||||
|  | 	INCQ SI | ||||||
|  | 
 | ||||||
|  | doLit: | ||||||
|  | 	// This is the end of the inner "switch", when we have a literal tag. | ||||||
|  | 	// | ||||||
|  | 	// We assume that CX == x and x fits in a uint32, where x is the variable | ||||||
|  | 	// used in the pure Go decode_other.go code. | ||||||
|  | 
 | ||||||
|  | 	// length = int(x) + 1 | ||||||
|  | 	// | ||||||
|  | 	// Unlike the pure Go code, we don't need to check if length <= 0 because | ||||||
|  | 	// CX can hold 64 bits, so the increment cannot overflow. | ||||||
|  | 	INCQ CX | ||||||
|  | 
 | ||||||
|  | 	// Prepare to check if copying length bytes will run past the end of dst or | ||||||
|  | 	// src. | ||||||
|  | 	// | ||||||
|  | 	// AX = len(dst) - d | ||||||
|  | 	// BX = len(src) - s | ||||||
|  | 	MOVQ R10, AX | ||||||
|  | 	SUBQ DI, AX | ||||||
|  | 	MOVQ R13, BX | ||||||
|  | 	SUBQ SI, BX | ||||||
|  | 
 | ||||||
|  | 	// !!! Try a faster technique for short (16 or fewer bytes) copies. | ||||||
|  | 	// | ||||||
|  | 	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { | ||||||
|  | 	//   goto callMemmove // Fall back on calling runtime·memmove. | ||||||
|  | 	// } | ||||||
|  | 	// | ||||||
|  | 	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s | ||||||
|  | 	// against 21 instead of 16, because it cannot assume that all of its input | ||||||
|  | 	// is contiguous in memory and so it needs to leave enough source bytes to | ||||||
|  | 	// read the next tag without refilling buffers, but Go's Decode assumes | ||||||
|  | 	// contiguousness (the src argument is a []byte). | ||||||
|  | 	CMPQ CX, $16 | ||||||
|  | 	JGT  callMemmove | ||||||
|  | 	CMPQ AX, $16 | ||||||
|  | 	JLT  callMemmove | ||||||
|  | 	CMPQ BX, $16 | ||||||
|  | 	JLT  callMemmove | ||||||
|  | 
 | ||||||
|  | 	// !!! Implement the copy from src to dst as a 16-byte load and store. | ||||||
|  | 	// (Decode's documentation says that dst and src must not overlap.) | ||||||
|  | 	// | ||||||
|  | 	// This always copies 16 bytes, instead of only length bytes, but that's | ||||||
|  | 	// OK. If the input is a valid Snappy encoding then subsequent iterations | ||||||
|  | 	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a | ||||||
|  | 	// non-nil error), so the overrun will be ignored. | ||||||
|  | 	// | ||||||
|  | 	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or | ||||||
|  | 	// 16-byte loads and stores. This technique probably wouldn't be as | ||||||
|  | 	// effective on architectures that are fussier about alignment. | ||||||
|  | 	MOVOU 0(SI), X0 | ||||||
|  | 	MOVOU X0, 0(DI) | ||||||
|  | 
 | ||||||
|  | 	// d += length | ||||||
|  | 	// s += length | ||||||
|  | 	ADDQ CX, DI | ||||||
|  | 	ADDQ CX, SI | ||||||
|  | 	JMP  loop | ||||||
|  | 
 | ||||||
|  | callMemmove: | ||||||
|  | 	// if length > len(dst)-d || length > len(src)-s { etc } | ||||||
|  | 	CMPQ CX, AX | ||||||
|  | 	JGT  errCorrupt | ||||||
|  | 	CMPQ CX, BX | ||||||
|  | 	JGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// copy(dst[d:], src[s:s+length]) | ||||||
|  | 	// | ||||||
|  | 	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push | ||||||
|  | 	// DI, SI and CX as arguments. Coincidentally, we also need to spill those | ||||||
|  | 	// three registers to the stack, to save local variables across the CALL. | ||||||
|  | 	MOVQ DI, 0(SP) | ||||||
|  | 	MOVQ SI, 8(SP) | ||||||
|  | 	MOVQ CX, 16(SP) | ||||||
|  | 	MOVQ DI, 24(SP) | ||||||
|  | 	MOVQ SI, 32(SP) | ||||||
|  | 	MOVQ CX, 40(SP) | ||||||
|  | 	CALL runtime·memmove(SB) | ||||||
|  | 
 | ||||||
|  | 	// Restore local variables: unspill registers from the stack and | ||||||
|  | 	// re-calculate R8-R13. | ||||||
|  | 	MOVQ 24(SP), DI | ||||||
|  | 	MOVQ 32(SP), SI | ||||||
|  | 	MOVQ 40(SP), CX | ||||||
|  | 	MOVQ dst_base+0(FP), R8 | ||||||
|  | 	MOVQ dst_len+8(FP), R9 | ||||||
|  | 	MOVQ R8, R10 | ||||||
|  | 	ADDQ R9, R10 | ||||||
|  | 	MOVQ src_base+24(FP), R11 | ||||||
|  | 	MOVQ src_len+32(FP), R12 | ||||||
|  | 	MOVQ R11, R13 | ||||||
|  | 	ADDQ R12, R13 | ||||||
|  | 
 | ||||||
|  | 	// d += length | ||||||
|  | 	// s += length | ||||||
|  | 	ADDQ CX, DI | ||||||
|  | 	ADDQ CX, SI | ||||||
|  | 	JMP  loop | ||||||
|  | 
 | ||||||
|  | tagLit60Plus: | ||||||
|  | 	// !!! This fragment does the | ||||||
|  | 	// | ||||||
|  | 	// s += x - 58; if uint(s) > uint(len(src)) { etc }
 | ||||||
|  | 	// | ||||||
|  | 	// checks. In the asm version, we code it once instead of once per switch case. | ||||||
|  | 	ADDQ CX, SI | ||||||
|  | 	SUBQ $58, SI | ||||||
|  | 	MOVQ SI, BX | ||||||
|  | 	SUBQ R11, BX | ||||||
|  | 	CMPQ BX, R12 | ||||||
|  | 	JA   errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// case x == 60: | ||||||
|  | 	CMPL CX, $61 | ||||||
|  | 	JEQ  tagLit61 | ||||||
|  | 	JA   tagLit62Plus | ||||||
|  | 
 | ||||||
|  | 	// x = uint32(src[s-1]) | ||||||
|  | 	MOVBLZX -1(SI), CX | ||||||
|  | 	JMP     doLit | ||||||
|  | 
 | ||||||
|  | tagLit61: | ||||||
|  | 	// case x == 61: | ||||||
|  | 	// x = uint32(src[s-2]) | uint32(src[s-1])<<8 | ||||||
|  | 	MOVWLZX -2(SI), CX | ||||||
|  | 	JMP     doLit | ||||||
|  | 
 | ||||||
|  | tagLit62Plus: | ||||||
|  | 	CMPL CX, $62 | ||||||
|  | 	JA   tagLit63 | ||||||
|  | 
 | ||||||
|  | 	// case x == 62: | ||||||
|  | 	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | ||||||
|  | 	MOVWLZX -3(SI), CX | ||||||
|  | 	MOVBLZX -1(SI), BX | ||||||
|  | 	SHLL    $16, BX | ||||||
|  | 	ORL     BX, CX | ||||||
|  | 	JMP     doLit | ||||||
|  | 
 | ||||||
|  | tagLit63: | ||||||
|  | 	// case x == 63: | ||||||
|  | 	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | ||||||
|  | 	MOVL -4(SI), CX | ||||||
|  | 	JMP  doLit | ||||||
|  | 
 | ||||||
|  | // The code above handles literal tags. | ||||||
|  | // ---------------------------------------- | ||||||
|  | // The code below handles copy tags. | ||||||
|  | 
 | ||||||
|  | tagCopy4: | ||||||
|  | 	// case tagCopy4: | ||||||
|  | 	// s += 5 | ||||||
|  | 	ADDQ $5, SI | ||||||
|  | 
 | ||||||
|  | 	// if uint(s) > uint(len(src)) { etc } | ||||||
|  | 	MOVQ SI, BX | ||||||
|  | 	SUBQ R11, BX | ||||||
|  | 	CMPQ BX, R12 | ||||||
|  | 	JA   errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// length = 1 + int(src[s-5])>>2 | ||||||
|  | 	SHRQ $2, CX | ||||||
|  | 	INCQ CX | ||||||
|  | 
 | ||||||
|  | 	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | ||||||
|  | 	MOVLQZX -4(SI), DX | ||||||
|  | 	JMP     doCopy | ||||||
|  | 
 | ||||||
|  | tagCopy2: | ||||||
|  | 	// case tagCopy2: | ||||||
|  | 	// s += 3 | ||||||
|  | 	ADDQ $3, SI | ||||||
|  | 
 | ||||||
|  | 	// if uint(s) > uint(len(src)) { etc } | ||||||
|  | 	MOVQ SI, BX | ||||||
|  | 	SUBQ R11, BX | ||||||
|  | 	CMPQ BX, R12 | ||||||
|  | 	JA   errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// length = 1 + int(src[s-3])>>2 | ||||||
|  | 	SHRQ $2, CX | ||||||
|  | 	INCQ CX | ||||||
|  | 
 | ||||||
|  | 	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | ||||||
|  | 	MOVWQZX -2(SI), DX | ||||||
|  | 	JMP     doCopy | ||||||
|  | 
 | ||||||
|  | tagCopy: | ||||||
|  | 	// We have a copy tag. We assume that: | ||||||
|  | 	//	- BX == src[s] & 0x03 | ||||||
|  | 	//	- CX == src[s] | ||||||
|  | 	CMPQ BX, $2 | ||||||
|  | 	JEQ  tagCopy2 | ||||||
|  | 	JA   tagCopy4 | ||||||
|  | 
 | ||||||
|  | 	// case tagCopy1: | ||||||
|  | 	// s += 2 | ||||||
|  | 	ADDQ $2, SI | ||||||
|  | 
 | ||||||
|  | 	// if uint(s) > uint(len(src)) { etc } | ||||||
|  | 	MOVQ SI, BX | ||||||
|  | 	SUBQ R11, BX | ||||||
|  | 	CMPQ BX, R12 | ||||||
|  | 	JA   errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | ||||||
|  | 	MOVQ    CX, DX | ||||||
|  | 	ANDQ    $0xe0, DX | ||||||
|  | 	SHLQ    $3, DX | ||||||
|  | 	MOVBQZX -1(SI), BX | ||||||
|  | 	ORQ     BX, DX | ||||||
|  | 
 | ||||||
|  | 	// length = 4 + int(src[s-2])>>2&0x7 | ||||||
|  | 	SHRQ $2, CX | ||||||
|  | 	ANDQ $7, CX | ||||||
|  | 	ADDQ $4, CX | ||||||
|  | 
 | ||||||
|  | doCopy: | ||||||
|  | 	// This is the end of the outer "switch", when we have a copy tag. | ||||||
|  | 	// | ||||||
|  | 	// We assume that: | ||||||
|  | 	//	- CX == length && CX > 0 | ||||||
|  | 	//	- DX == offset | ||||||
|  | 
 | ||||||
|  | 	// if offset <= 0 { etc } | ||||||
|  | 	CMPQ DX, $0 | ||||||
|  | 	JLE  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// if d < offset { etc } | ||||||
|  | 	MOVQ DI, BX | ||||||
|  | 	SUBQ R8, BX | ||||||
|  | 	CMPQ BX, DX | ||||||
|  | 	JLT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// if length > len(dst)-d { etc } | ||||||
|  | 	MOVQ R10, BX | ||||||
|  | 	SUBQ DI, BX | ||||||
|  | 	CMPQ CX, BX | ||||||
|  | 	JGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
 | ||||||
|  | 	// | ||||||
|  | 	// Set: | ||||||
|  | 	//	- R14 = len(dst)-d | ||||||
|  | 	//	- R15 = &dst[d-offset] | ||||||
|  | 	MOVQ R10, R14 | ||||||
|  | 	SUBQ DI, R14 | ||||||
|  | 	MOVQ DI, R15 | ||||||
|  | 	SUBQ DX, R15 | ||||||
|  | 
 | ||||||
|  | 	// !!! Try a faster technique for short (16 or fewer bytes) forward copies. | ||||||
|  | 	// | ||||||
|  | 	// First, try using two 8-byte load/stores, similar to the doLit technique | ||||||
|  | 	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is | ||||||
|  | 	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores | ||||||
|  | 	// and not one 16-byte load/store, and the first store has to be before the | ||||||
|  | 	// second load, due to the overlap if offset is in the range [8, 16). | ||||||
|  | 	// | ||||||
|  | 	// if length > 16 || offset < 8 || len(dst)-d < 16 { | ||||||
|  | 	//   goto slowForwardCopy | ||||||
|  | 	// } | ||||||
|  | 	// copy 16 bytes | ||||||
|  | 	// d += length | ||||||
|  | 	CMPQ CX, $16 | ||||||
|  | 	JGT  slowForwardCopy | ||||||
|  | 	CMPQ DX, $8 | ||||||
|  | 	JLT  slowForwardCopy | ||||||
|  | 	CMPQ R14, $16 | ||||||
|  | 	JLT  slowForwardCopy | ||||||
|  | 	MOVQ 0(R15), AX | ||||||
|  | 	MOVQ AX, 0(DI) | ||||||
|  | 	MOVQ 8(R15), BX | ||||||
|  | 	MOVQ BX, 8(DI) | ||||||
|  | 	ADDQ CX, DI | ||||||
|  | 	JMP  loop | ||||||
|  | 
 | ||||||
|  | slowForwardCopy: | ||||||
|  | 	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we | ||||||
|  | 	// can still try 8-byte load stores, provided we can overrun up to 10 extra | ||||||
|  | 	// bytes. As above, the overrun will be fixed up by subsequent iterations | ||||||
|  | 	// of the outermost loop. | ||||||
|  | 	// | ||||||
|  | 	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its | ||||||
|  | 	// commentary says: | ||||||
|  | 	// | ||||||
|  | 	// ---- | ||||||
|  | 	// | ||||||
|  | 	// The main part of this loop is a simple copy of eight bytes at a time | ||||||
|  | 	// until we've copied (at least) the requested amount of bytes.  However, | ||||||
|  | 	// if d and d-offset are less than eight bytes apart (indicating a | ||||||
|  | 	// repeating pattern of length < 8), we first need to expand the pattern in | ||||||
|  | 	// order to get the correct results. For instance, if the buffer looks like | ||||||
|  | 	// this, with the eight-byte <d-offset> and <d> patterns marked as | ||||||
|  | 	// intervals: | ||||||
|  | 	// | ||||||
|  | 	//    abxxxxxxxxxxxx | ||||||
|  | 	//    [------]           d-offset | ||||||
|  | 	//      [------]         d | ||||||
|  | 	// | ||||||
|  | 	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern | ||||||
|  | 	// once, after which we can move <d> two bytes without moving <d-offset>: | ||||||
|  | 	// | ||||||
|  | 	//    ababxxxxxxxxxx | ||||||
|  | 	//    [------]           d-offset | ||||||
|  | 	//        [------]       d | ||||||
|  | 	// | ||||||
|  | 	// and repeat the exercise until the two no longer overlap. | ||||||
|  | 	// | ||||||
|  | 	// This allows us to do very well in the special case of one single byte | ||||||
|  | 	// repeated many times, without taking a big hit for more general cases. | ||||||
|  | 	// | ||||||
|  | 	// The worst case of extra writing past the end of the match occurs when | ||||||
|  | 	// offset == 1 and length == 1; the last copy will read from byte positions
 | ||||||
|  | 	// [0..7] and write to [4..11], whereas it was only supposed to write to | ||||||
|  | 	// position 1. Thus, ten excess bytes. | ||||||
|  | 	// | ||||||
|  | 	// ---- | ||||||
|  | 	// | ||||||
|  | 	// That "10 byte overrun" worst case is confirmed by Go's | ||||||
|  | 	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy | ||||||
|  | 	// and finishSlowForwardCopy algorithm. | ||||||
|  | 	// | ||||||
|  | 	// if length > len(dst)-d-10 { | ||||||
|  | 	//   goto verySlowForwardCopy | ||||||
|  | 	// } | ||||||
|  | 	SUBQ $10, R14 | ||||||
|  | 	CMPQ CX, R14 | ||||||
|  | 	JGT  verySlowForwardCopy | ||||||
|  | 
 | ||||||
|  | makeOffsetAtLeast8: | ||||||
|  | 	// !!! As above, expand the pattern so that offset >= 8 and we can use | ||||||
|  | 	// 8-byte load/stores. | ||||||
|  | 	// | ||||||
|  | 	// for offset < 8 { | ||||||
|  | 	//   copy 8 bytes from dst[d-offset:] to dst[d:] | ||||||
|  | 	//   length -= offset | ||||||
|  | 	//   d      += offset | ||||||
|  | 	//   offset += offset | ||||||
|  | 	//   // The two previous lines together means that d-offset, and therefore | ||||||
|  | 	//   // R15, is unchanged. | ||||||
|  | 	// } | ||||||
|  | 	CMPQ DX, $8 | ||||||
|  | 	JGE  fixUpSlowForwardCopy | ||||||
|  | 	MOVQ (R15), BX | ||||||
|  | 	MOVQ BX, (DI) | ||||||
|  | 	SUBQ DX, CX | ||||||
|  | 	ADDQ DX, DI | ||||||
|  | 	ADDQ DX, DX | ||||||
|  | 	JMP  makeOffsetAtLeast8 | ||||||
|  | 
 | ||||||
|  | fixUpSlowForwardCopy: | ||||||
|  | 	// !!! Add length (which might be negative now) to d (implied by DI being | ||||||
|  | 	// &dst[d]) so that d ends up at the right place when we jump back to the | ||||||
|  | 	// top of the loop. Before we do that, though, we save DI to AX so that, if | ||||||
|  | 	// length is positive, copying the remaining length bytes will write to the | ||||||
|  | 	// right place. | ||||||
|  | 	MOVQ DI, AX | ||||||
|  | 	ADDQ CX, DI | ||||||
|  | 
 | ||||||
|  | finishSlowForwardCopy: | ||||||
|  | 	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative | ||||||
|  | 	// length means that we overrun, but as above, that will be fixed up by | ||||||
|  | 	// subsequent iterations of the outermost loop. | ||||||
|  | 	CMPQ CX, $0 | ||||||
|  | 	JLE  loop | ||||||
|  | 	MOVQ (R15), BX | ||||||
|  | 	MOVQ BX, (AX) | ||||||
|  | 	ADDQ $8, R15 | ||||||
|  | 	ADDQ $8, AX | ||||||
|  | 	SUBQ $8, CX | ||||||
|  | 	JMP  finishSlowForwardCopy | ||||||
|  | 
 | ||||||
|  | verySlowForwardCopy: | ||||||
|  | 	// verySlowForwardCopy is a simple implementation of forward copy. In C | ||||||
|  | 	// parlance, this is a do/while loop instead of a while loop, since we know | ||||||
|  | 	// that length > 0. In Go syntax: | ||||||
|  | 	// | ||||||
|  | 	// for { | ||||||
|  | 	//   dst[d] = dst[d - offset] | ||||||
|  | 	//   d++ | ||||||
|  | 	//   length-- | ||||||
|  | 	//   if length == 0 { | ||||||
|  | 	//     break | ||||||
|  | 	//   } | ||||||
|  | 	// } | ||||||
|  | 	MOVB (R15), BX | ||||||
|  | 	MOVB BX, (DI) | ||||||
|  | 	INCQ R15 | ||||||
|  | 	INCQ DI | ||||||
|  | 	DECQ CX | ||||||
|  | 	JNZ  verySlowForwardCopy | ||||||
|  | 	JMP  loop | ||||||
|  | 
 | ||||||
|  | // The code above handles copy tags. | ||||||
|  | // ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | end: | ||||||
|  | 	// This is the end of the "for s < len(src)". | ||||||
|  | 	// | ||||||
|  | 	// if d != len(dst) { etc } | ||||||
|  | 	CMPQ DI, R10 | ||||||
|  | 	JNE  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// return 0 | ||||||
|  | 	MOVQ $0, ret+48(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | errCorrupt: | ||||||
|  | 	// return decodeErrCodeCorrupt | ||||||
|  | 	MOVQ $1, ret+48(FP) | ||||||
|  | 	RET | ||||||
							
								
								
									
										494
									
								
								vendor/github.com/golang/snappy/decode_arm64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										494
									
								
								vendor/github.com/golang/snappy/decode_arm64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,494 @@ | ||||||
|  | // Copyright 2020 The Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | 
 | ||||||
|  | #include "textflag.h" | ||||||
|  | 
 | ||||||
|  | // The asm code generally follows the pure Go code in decode_other.go, except | ||||||
|  | // where marked with a "!!!". | ||||||
|  | 
 | ||||||
|  | // func decode(dst, src []byte) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The non-zero stack size is only to | ||||||
|  | // spill registers and push args when issuing a CALL. The register allocation: | ||||||
|  | //	- R2	scratch | ||||||
|  | //	- R3	scratch | ||||||
|  | //	- R4	length or x | ||||||
|  | //	- R5	offset | ||||||
|  | //	- R6	&src[s] | ||||||
|  | //	- R7	&dst[d] | ||||||
|  | //	+ R8	dst_base | ||||||
|  | //	+ R9	dst_len | ||||||
|  | //	+ R10	dst_base + dst_len | ||||||
|  | //	+ R11	src_base | ||||||
|  | //	+ R12	src_len | ||||||
|  | //	+ R13	src_base + src_len | ||||||
|  | //	- R14	used by doCopy | ||||||
|  | //	- R15	used by doCopy | ||||||
|  | // | ||||||
|  | // The registers R8-R13 (marked with a "+") are set at the start of the | ||||||
|  | // function, and after a CALL returns, and are not otherwise modified. | ||||||
|  | // | ||||||
|  | // The d variable is implicitly R7 - R8,  and len(dst)-d is R10 - R7. | ||||||
|  | // The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. | ||||||
|  | TEXT ·decode(SB), NOSPLIT, $56-56 | ||||||
|  | 	// Initialize R6, R7 and R8-R13. | ||||||
|  | 	MOVD dst_base+0(FP), R8 | ||||||
|  | 	MOVD dst_len+8(FP), R9 | ||||||
|  | 	MOVD R8, R7 | ||||||
|  | 	MOVD R8, R10 | ||||||
|  | 	ADD  R9, R10, R10 | ||||||
|  | 	MOVD src_base+24(FP), R11 | ||||||
|  | 	MOVD src_len+32(FP), R12 | ||||||
|  | 	MOVD R11, R6 | ||||||
|  | 	MOVD R11, R13 | ||||||
|  | 	ADD  R12, R13, R13 | ||||||
|  | 
 | ||||||
|  | loop: | ||||||
|  | 	// for s < len(src) | ||||||
|  | 	CMP R13, R6 | ||||||
|  | 	BEQ end | ||||||
|  | 
 | ||||||
|  | 	// R4 = uint32(src[s]) | ||||||
|  | 	// | ||||||
|  | 	// switch src[s] & 0x03 | ||||||
|  | 	MOVBU (R6), R4 | ||||||
|  | 	MOVW  R4, R3 | ||||||
|  | 	ANDW  $3, R3 | ||||||
|  | 	MOVW  $1, R1 | ||||||
|  | 	CMPW  R1, R3 | ||||||
|  | 	BGE   tagCopy | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// The code below handles literal tags. | ||||||
|  | 
 | ||||||
|  | 	// case tagLiteral: | ||||||
|  | 	// x := uint32(src[s] >> 2) | ||||||
|  | 	// switch | ||||||
|  | 	MOVW $60, R1 | ||||||
|  | 	LSRW $2, R4, R4 | ||||||
|  | 	CMPW R4, R1 | ||||||
|  | 	BLS  tagLit60Plus | ||||||
|  | 
 | ||||||
|  | 	// case x < 60: | ||||||
|  | 	// s++ | ||||||
|  | 	ADD $1, R6, R6 | ||||||
|  | 
 | ||||||
|  | doLit: | ||||||
|  | 	// This is the end of the inner "switch", when we have a literal tag. | ||||||
|  | 	// | ||||||
|  | 	// We assume that R4 == x and x fits in a uint32, where x is the variable | ||||||
|  | 	// used in the pure Go decode_other.go code. | ||||||
|  | 
 | ||||||
|  | 	// length = int(x) + 1 | ||||||
|  | 	// | ||||||
|  | 	// Unlike the pure Go code, we don't need to check if length <= 0 because | ||||||
|  | 	// R4 can hold 64 bits, so the increment cannot overflow. | ||||||
|  | 	ADD $1, R4, R4 | ||||||
|  | 
 | ||||||
|  | 	// Prepare to check if copying length bytes will run past the end of dst or | ||||||
|  | 	// src. | ||||||
|  | 	// | ||||||
|  | 	// R2 = len(dst) - d | ||||||
|  | 	// R3 = len(src) - s | ||||||
|  | 	MOVD R10, R2 | ||||||
|  | 	SUB  R7, R2, R2 | ||||||
|  | 	MOVD R13, R3 | ||||||
|  | 	SUB  R6, R3, R3 | ||||||
|  | 
 | ||||||
|  | 	// !!! Try a faster technique for short (16 or fewer bytes) copies. | ||||||
|  | 	// | ||||||
|  | 	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { | ||||||
|  | 	//   goto callMemmove // Fall back on calling runtime·memmove. | ||||||
|  | 	// } | ||||||
|  | 	// | ||||||
|  | 	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s | ||||||
|  | 	// against 21 instead of 16, because it cannot assume that all of its input | ||||||
|  | 	// is contiguous in memory and so it needs to leave enough source bytes to | ||||||
|  | 	// read the next tag without refilling buffers, but Go's Decode assumes | ||||||
|  | 	// contiguousness (the src argument is a []byte). | ||||||
|  | 	CMP $16, R4 | ||||||
|  | 	BGT callMemmove | ||||||
|  | 	CMP $16, R2 | ||||||
|  | 	BLT callMemmove | ||||||
|  | 	CMP $16, R3 | ||||||
|  | 	BLT callMemmove | ||||||
|  | 
 | ||||||
|  | 	// !!! Implement the copy from src to dst as a 16-byte load and store. | ||||||
|  | 	// (Decode's documentation says that dst and src must not overlap.) | ||||||
|  | 	// | ||||||
|  | 	// This always copies 16 bytes, instead of only length bytes, but that's | ||||||
|  | 	// OK. If the input is a valid Snappy encoding then subsequent iterations | ||||||
|  | 	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a | ||||||
|  | 	// non-nil error), so the overrun will be ignored. | ||||||
|  | 	// | ||||||
|  | 	// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or | ||||||
|  | 	// 16-byte loads and stores. This technique probably wouldn't be as | ||||||
|  | 	// effective on architectures that are fussier about alignment. | ||||||
|  | 	LDP 0(R6), (R14, R15) | ||||||
|  | 	STP (R14, R15), 0(R7) | ||||||
|  | 
 | ||||||
|  | 	// d += length | ||||||
|  | 	// s += length | ||||||
|  | 	ADD R4, R7, R7 | ||||||
|  | 	ADD R4, R6, R6 | ||||||
|  | 	B   loop | ||||||
|  | 
 | ||||||
|  | callMemmove: | ||||||
|  | 	// if length > len(dst)-d || length > len(src)-s { etc } | ||||||
|  | 	CMP R2, R4 | ||||||
|  | 	BGT errCorrupt | ||||||
|  | 	CMP R3, R4 | ||||||
|  | 	BGT errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// copy(dst[d:], src[s:s+length]) | ||||||
|  | 	// | ||||||
|  | 	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push | ||||||
|  | 	// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those | ||||||
|  | 	// three registers to the stack, to save local variables across the CALL. | ||||||
|  | 	MOVD R7, 8(RSP) | ||||||
|  | 	MOVD R6, 16(RSP) | ||||||
|  | 	MOVD R4, 24(RSP) | ||||||
|  | 	MOVD R7, 32(RSP) | ||||||
|  | 	MOVD R6, 40(RSP) | ||||||
|  | 	MOVD R4, 48(RSP) | ||||||
|  | 	CALL runtime·memmove(SB) | ||||||
|  | 
 | ||||||
|  | 	// Restore local variables: unspill registers from the stack and | ||||||
|  | 	// re-calculate R8-R13. | ||||||
|  | 	MOVD 32(RSP), R7 | ||||||
|  | 	MOVD 40(RSP), R6 | ||||||
|  | 	MOVD 48(RSP), R4 | ||||||
|  | 	MOVD dst_base+0(FP), R8 | ||||||
|  | 	MOVD dst_len+8(FP), R9 | ||||||
|  | 	MOVD R8, R10 | ||||||
|  | 	ADD  R9, R10, R10 | ||||||
|  | 	MOVD src_base+24(FP), R11 | ||||||
|  | 	MOVD src_len+32(FP), R12 | ||||||
|  | 	MOVD R11, R13 | ||||||
|  | 	ADD  R12, R13, R13 | ||||||
|  | 
 | ||||||
|  | 	// d += length | ||||||
|  | 	// s += length | ||||||
|  | 	ADD R4, R7, R7 | ||||||
|  | 	ADD R4, R6, R6 | ||||||
|  | 	B   loop | ||||||
|  | 
 | ||||||
|  | tagLit60Plus: | ||||||
|  | 	// !!! This fragment does the | ||||||
|  | 	// | ||||||
|  | 	// s += x - 58; if uint(s) > uint(len(src)) { etc }
 | ||||||
|  | 	// | ||||||
|  | 	// checks. In the asm version, we code it once instead of once per switch case. | ||||||
|  | 	ADD  R4, R6, R6 | ||||||
|  | 	SUB  $58, R6, R6 | ||||||
|  | 	MOVD R6, R3 | ||||||
|  | 	SUB  R11, R3, R3 | ||||||
|  | 	CMP  R12, R3 | ||||||
|  | 	BGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// case x == 60: | ||||||
|  | 	MOVW $61, R1 | ||||||
|  | 	CMPW R1, R4 | ||||||
|  | 	BEQ  tagLit61 | ||||||
|  | 	BGT  tagLit62Plus | ||||||
|  | 
 | ||||||
|  | 	// x = uint32(src[s-1]) | ||||||
|  | 	MOVBU -1(R6), R4 | ||||||
|  | 	B     doLit | ||||||
|  | 
 | ||||||
|  | tagLit61: | ||||||
|  | 	// case x == 61: | ||||||
|  | 	// x = uint32(src[s-2]) | uint32(src[s-1])<<8 | ||||||
|  | 	MOVHU -2(R6), R4 | ||||||
|  | 	B     doLit | ||||||
|  | 
 | ||||||
|  | tagLit62Plus: | ||||||
|  | 	CMPW $62, R4 | ||||||
|  | 	BHI  tagLit63 | ||||||
|  | 
 | ||||||
|  | 	// case x == 62: | ||||||
|  | 	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | ||||||
|  | 	MOVHU -3(R6), R4 | ||||||
|  | 	MOVBU -1(R6), R3 | ||||||
|  | 	ORR   R3<<16, R4 | ||||||
|  | 	B     doLit | ||||||
|  | 
 | ||||||
|  | tagLit63: | ||||||
|  | 	// case x == 63: | ||||||
|  | 	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | ||||||
|  | 	MOVWU -4(R6), R4 | ||||||
|  | 	B     doLit | ||||||
|  | 
 | ||||||
|  | 	// The code above handles literal tags. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// The code below handles copy tags. | ||||||
|  | 
 | ||||||
|  | tagCopy4: | ||||||
|  | 	// case tagCopy4: | ||||||
|  | 	// s += 5 | ||||||
|  | 	ADD $5, R6, R6 | ||||||
|  | 
 | ||||||
|  | 	// if uint(s) > uint(len(src)) { etc } | ||||||
|  | 	MOVD R6, R3 | ||||||
|  | 	SUB  R11, R3, R3 | ||||||
|  | 	CMP  R12, R3 | ||||||
|  | 	BGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// length = 1 + int(src[s-5])>>2 | ||||||
|  | 	MOVD $1, R1 | ||||||
|  | 	ADD  R4>>2, R1, R4 | ||||||
|  | 
 | ||||||
|  | 	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | ||||||
|  | 	MOVWU -4(R6), R5 | ||||||
|  | 	B     doCopy | ||||||
|  | 
 | ||||||
|  | tagCopy2: | ||||||
|  | 	// case tagCopy2: | ||||||
|  | 	// s += 3 | ||||||
|  | 	ADD $3, R6, R6 | ||||||
|  | 
 | ||||||
|  | 	// if uint(s) > uint(len(src)) { etc } | ||||||
|  | 	MOVD R6, R3 | ||||||
|  | 	SUB  R11, R3, R3 | ||||||
|  | 	CMP  R12, R3 | ||||||
|  | 	BGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// length = 1 + int(src[s-3])>>2 | ||||||
|  | 	MOVD $1, R1 | ||||||
|  | 	ADD  R4>>2, R1, R4 | ||||||
|  | 
 | ||||||
|  | 	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | ||||||
|  | 	MOVHU -2(R6), R5 | ||||||
|  | 	B     doCopy | ||||||
|  | 
 | ||||||
|  | tagCopy: | ||||||
|  | 	// We have a copy tag. We assume that: | ||||||
|  | 	//	- R3 == src[s] & 0x03 | ||||||
|  | 	//	- R4 == src[s] | ||||||
|  | 	CMP $2, R3 | ||||||
|  | 	BEQ tagCopy2 | ||||||
|  | 	BGT tagCopy4 | ||||||
|  | 
 | ||||||
|  | 	// case tagCopy1: | ||||||
|  | 	// s += 2 | ||||||
|  | 	ADD $2, R6, R6 | ||||||
|  | 
 | ||||||
|  | 	// if uint(s) > uint(len(src)) { etc } | ||||||
|  | 	MOVD R6, R3 | ||||||
|  | 	SUB  R11, R3, R3 | ||||||
|  | 	CMP  R12, R3 | ||||||
|  | 	BGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | ||||||
|  | 	MOVD  R4, R5 | ||||||
|  | 	AND   $0xe0, R5 | ||||||
|  | 	MOVBU -1(R6), R3 | ||||||
|  | 	ORR   R5<<3, R3, R5 | ||||||
|  | 
 | ||||||
|  | 	// length = 4 + int(src[s-2])>>2&0x7 | ||||||
|  | 	MOVD $7, R1 | ||||||
|  | 	AND  R4>>2, R1, R4 | ||||||
|  | 	ADD  $4, R4, R4 | ||||||
|  | 
 | ||||||
|  | doCopy: | ||||||
|  | 	// This is the end of the outer "switch", when we have a copy tag. | ||||||
|  | 	// | ||||||
|  | 	// We assume that: | ||||||
|  | 	//	- R4 == length && R4 > 0 | ||||||
|  | 	//	- R5 == offset | ||||||
|  | 
 | ||||||
|  | 	// if offset <= 0 { etc } | ||||||
|  | 	MOVD $0, R1 | ||||||
|  | 	CMP  R1, R5 | ||||||
|  | 	BLE  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// if d < offset { etc } | ||||||
|  | 	MOVD R7, R3 | ||||||
|  | 	SUB  R8, R3, R3 | ||||||
|  | 	CMP  R5, R3 | ||||||
|  | 	BLT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// if length > len(dst)-d { etc } | ||||||
|  | 	MOVD R10, R3 | ||||||
|  | 	SUB  R7, R3, R3 | ||||||
|  | 	CMP  R3, R4 | ||||||
|  | 	BGT  errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
 | ||||||
|  | 	// | ||||||
|  | 	// Set: | ||||||
|  | 	//	- R14 = len(dst)-d | ||||||
|  | 	//	- R15 = &dst[d-offset] | ||||||
|  | 	MOVD R10, R14 | ||||||
|  | 	SUB  R7, R14, R14 | ||||||
|  | 	MOVD R7, R15 | ||||||
|  | 	SUB  R5, R15, R15 | ||||||
|  | 
 | ||||||
|  | 	// !!! Try a faster technique for short (16 or fewer bytes) forward copies. | ||||||
|  | 	// | ||||||
|  | 	// First, try using two 8-byte load/stores, similar to the doLit technique | ||||||
|  | 	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is | ||||||
|  | 	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores | ||||||
|  | 	// and not one 16-byte load/store, and the first store has to be before the | ||||||
|  | 	// second load, due to the overlap if offset is in the range [8, 16). | ||||||
|  | 	// | ||||||
|  | 	// if length > 16 || offset < 8 || len(dst)-d < 16 { | ||||||
|  | 	//   goto slowForwardCopy | ||||||
|  | 	// } | ||||||
|  | 	// copy 16 bytes | ||||||
|  | 	// d += length | ||||||
|  | 	CMP  $16, R4 | ||||||
|  | 	BGT  slowForwardCopy | ||||||
|  | 	CMP  $8, R5 | ||||||
|  | 	BLT  slowForwardCopy | ||||||
|  | 	CMP  $16, R14 | ||||||
|  | 	BLT  slowForwardCopy | ||||||
|  | 	MOVD 0(R15), R2 | ||||||
|  | 	MOVD R2, 0(R7) | ||||||
|  | 	MOVD 8(R15), R3 | ||||||
|  | 	MOVD R3, 8(R7) | ||||||
|  | 	ADD  R4, R7, R7 | ||||||
|  | 	B    loop | ||||||
|  | 
 | ||||||
|  | slowForwardCopy: | ||||||
|  | 	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we | ||||||
|  | 	// can still try 8-byte load stores, provided we can overrun up to 10 extra | ||||||
|  | 	// bytes. As above, the overrun will be fixed up by subsequent iterations | ||||||
|  | 	// of the outermost loop. | ||||||
|  | 	// | ||||||
|  | 	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its | ||||||
|  | 	// commentary says: | ||||||
|  | 	// | ||||||
|  | 	// ---- | ||||||
|  | 	// | ||||||
|  | 	// The main part of this loop is a simple copy of eight bytes at a time | ||||||
|  | 	// until we've copied (at least) the requested amount of bytes.  However, | ||||||
|  | 	// if d and d-offset are less than eight bytes apart (indicating a | ||||||
|  | 	// repeating pattern of length < 8), we first need to expand the pattern in | ||||||
|  | 	// order to get the correct results. For instance, if the buffer looks like | ||||||
|  | 	// this, with the eight-byte <d-offset> and <d> patterns marked as | ||||||
|  | 	// intervals: | ||||||
|  | 	// | ||||||
|  | 	//    abxxxxxxxxxxxx | ||||||
|  | 	//    [------]           d-offset | ||||||
|  | 	//      [------]         d | ||||||
|  | 	// | ||||||
|  | 	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern | ||||||
|  | 	// once, after which we can move <d> two bytes without moving <d-offset>: | ||||||
|  | 	// | ||||||
|  | 	//    ababxxxxxxxxxx | ||||||
|  | 	//    [------]           d-offset | ||||||
|  | 	//        [------]       d | ||||||
|  | 	// | ||||||
|  | 	// and repeat the exercise until the two no longer overlap. | ||||||
|  | 	// | ||||||
|  | 	// This allows us to do very well in the special case of one single byte | ||||||
|  | 	// repeated many times, without taking a big hit for more general cases. | ||||||
|  | 	// | ||||||
|  | 	// The worst case of extra writing past the end of the match occurs when | ||||||
|  | 	// offset == 1 and length == 1; the last copy will read from byte positions
 | ||||||
|  | 	// [0..7] and write to [4..11], whereas it was only supposed to write to | ||||||
|  | 	// position 1. Thus, ten excess bytes. | ||||||
|  | 	// | ||||||
|  | 	// ---- | ||||||
|  | 	// | ||||||
|  | 	// That "10 byte overrun" worst case is confirmed by Go's | ||||||
|  | 	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy | ||||||
|  | 	// and finishSlowForwardCopy algorithm. | ||||||
|  | 	// | ||||||
|  | 	// if length > len(dst)-d-10 { | ||||||
|  | 	//   goto verySlowForwardCopy | ||||||
|  | 	// } | ||||||
|  | 	SUB $10, R14, R14 | ||||||
|  | 	CMP R14, R4 | ||||||
|  | 	BGT verySlowForwardCopy | ||||||
|  | 
 | ||||||
|  | makeOffsetAtLeast8: | ||||||
|  | 	// !!! As above, expand the pattern so that offset >= 8 and we can use | ||||||
|  | 	// 8-byte load/stores. | ||||||
|  | 	// | ||||||
|  | 	// for offset < 8 { | ||||||
|  | 	//   copy 8 bytes from dst[d-offset:] to dst[d:] | ||||||
|  | 	//   length -= offset | ||||||
|  | 	//   d      += offset | ||||||
|  | 	//   offset += offset | ||||||
|  | 	//   // The two previous lines together means that d-offset, and therefore | ||||||
|  | 	//   // R15, is unchanged. | ||||||
|  | 	// } | ||||||
|  | 	CMP  $8, R5 | ||||||
|  | 	BGE  fixUpSlowForwardCopy | ||||||
|  | 	MOVD (R15), R3 | ||||||
|  | 	MOVD R3, (R7) | ||||||
|  | 	SUB  R5, R4, R4 | ||||||
|  | 	ADD  R5, R7, R7 | ||||||
|  | 	ADD  R5, R5, R5 | ||||||
|  | 	B    makeOffsetAtLeast8 | ||||||
|  | 
 | ||||||
|  | fixUpSlowForwardCopy: | ||||||
|  | 	// !!! Add length (which might be negative now) to d (implied by R7 being | ||||||
|  | 	// &dst[d]) so that d ends up at the right place when we jump back to the | ||||||
|  | 	// top of the loop. Before we do that, though, we save R7 to R2 so that, if | ||||||
|  | 	// length is positive, copying the remaining length bytes will write to the | ||||||
|  | 	// right place. | ||||||
|  | 	MOVD R7, R2 | ||||||
|  | 	ADD  R4, R7, R7 | ||||||
|  | 
 | ||||||
|  | finishSlowForwardCopy: | ||||||
|  | 	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative | ||||||
|  | 	// length means that we overrun, but as above, that will be fixed up by | ||||||
|  | 	// subsequent iterations of the outermost loop. | ||||||
|  | 	MOVD $0, R1 | ||||||
|  | 	CMP  R1, R4 | ||||||
|  | 	BLE  loop | ||||||
|  | 	MOVD (R15), R3 | ||||||
|  | 	MOVD R3, (R2) | ||||||
|  | 	ADD  $8, R15, R15 | ||||||
|  | 	ADD  $8, R2, R2 | ||||||
|  | 	SUB  $8, R4, R4 | ||||||
|  | 	B    finishSlowForwardCopy | ||||||
|  | 
 | ||||||
|  | verySlowForwardCopy: | ||||||
|  | 	// verySlowForwardCopy is a simple implementation of forward copy. In C | ||||||
|  | 	// parlance, this is a do/while loop instead of a while loop, since we know | ||||||
|  | 	// that length > 0. In Go syntax: | ||||||
|  | 	// | ||||||
|  | 	// for { | ||||||
|  | 	//   dst[d] = dst[d - offset] | ||||||
|  | 	//   d++ | ||||||
|  | 	//   length-- | ||||||
|  | 	//   if length == 0 { | ||||||
|  | 	//     break | ||||||
|  | 	//   } | ||||||
|  | 	// } | ||||||
|  | 	MOVB (R15), R3 | ||||||
|  | 	MOVB R3, (R7) | ||||||
|  | 	ADD  $1, R15, R15 | ||||||
|  | 	ADD  $1, R7, R7 | ||||||
|  | 	SUB  $1, R4, R4 | ||||||
|  | 	CBNZ R4, verySlowForwardCopy | ||||||
|  | 	B    loop | ||||||
|  | 
 | ||||||
|  | 	// The code above handles copy tags. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | end: | ||||||
|  | 	// This is the end of the "for s < len(src)". | ||||||
|  | 	// | ||||||
|  | 	// if d != len(dst) { etc } | ||||||
|  | 	CMP R10, R7 | ||||||
|  | 	BNE errCorrupt | ||||||
|  | 
 | ||||||
|  | 	// return 0 | ||||||
|  | 	MOVD $0, ret+48(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | errCorrupt: | ||||||
|  | 	// return decodeErrCodeCorrupt | ||||||
|  | 	MOVD $1, R2 | ||||||
|  | 	MOVD R2, ret+48(FP) | ||||||
|  | 	RET | ||||||
							
								
								
									
										15
									
								
								vendor/github.com/golang/snappy/decode_asm.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/golang/snappy/decode_asm.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,15 @@ | ||||||
|  | // Copyright 2016 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | // +build amd64 arm64 | ||||||
|  | 
 | ||||||
|  | package snappy | ||||||
|  | 
 | ||||||
|  | // decode has the same semantics as in decode_other.go. | ||||||
|  | // | ||||||
|  | //go:noescape | ||||||
|  | func decode(dst, src []byte) int | ||||||
							
								
								
									
										115
									
								
								vendor/github.com/golang/snappy/decode_other.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										115
									
								
								vendor/github.com/golang/snappy/decode_other.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,115 @@ | ||||||
|  | // Copyright 2016 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !amd64,!arm64 appengine !gc noasm | ||||||
|  | 
 | ||||||
|  | package snappy | ||||||
|  | 
 | ||||||
|  | // decode writes the decoding of src to dst. It assumes that the varint-encoded | ||||||
|  | // length of the decompressed bytes has already been read, and that len(dst) | ||||||
|  | // equals that length. | ||||||
|  | // | ||||||
|  | // It returns 0 on success or a decodeErrCodeXxx error code on failure. | ||||||
|  | func decode(dst, src []byte) int { | ||||||
|  | 	var d, s, offset, length int | ||||||
|  | 	for s < len(src) { | ||||||
|  | 		switch src[s] & 0x03 { | ||||||
|  | 		case tagLiteral: | ||||||
|  | 			x := uint32(src[s] >> 2) | ||||||
|  | 			switch { | ||||||
|  | 			case x < 60: | ||||||
|  | 				s++ | ||||||
|  | 			case x == 60: | ||||||
|  | 				s += 2 | ||||||
|  | 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 					return decodeErrCodeCorrupt | ||||||
|  | 				} | ||||||
|  | 				x = uint32(src[s-1]) | ||||||
|  | 			case x == 61: | ||||||
|  | 				s += 3 | ||||||
|  | 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 					return decodeErrCodeCorrupt | ||||||
|  | 				} | ||||||
|  | 				x = uint32(src[s-2]) | uint32(src[s-1])<<8 | ||||||
|  | 			case x == 62: | ||||||
|  | 				s += 4 | ||||||
|  | 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 					return decodeErrCodeCorrupt | ||||||
|  | 				} | ||||||
|  | 				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | ||||||
|  | 			case x == 63: | ||||||
|  | 				s += 5 | ||||||
|  | 				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 					return decodeErrCodeCorrupt | ||||||
|  | 				} | ||||||
|  | 				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | ||||||
|  | 			} | ||||||
|  | 			length = int(x) + 1 | ||||||
|  | 			if length <= 0 { | ||||||
|  | 				return decodeErrCodeUnsupportedLiteralLength | ||||||
|  | 			} | ||||||
|  | 			if length > len(dst)-d || length > len(src)-s { | ||||||
|  | 				return decodeErrCodeCorrupt | ||||||
|  | 			} | ||||||
|  | 			copy(dst[d:], src[s:s+length]) | ||||||
|  | 			d += length | ||||||
|  | 			s += length | ||||||
|  | 			continue | ||||||
|  | 
 | ||||||
|  | 		case tagCopy1: | ||||||
|  | 			s += 2 | ||||||
|  | 			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 				return decodeErrCodeCorrupt | ||||||
|  | 			} | ||||||
|  | 			length = 4 + int(src[s-2])>>2&0x7 | ||||||
|  | 			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | ||||||
|  | 
 | ||||||
|  | 		case tagCopy2: | ||||||
|  | 			s += 3 | ||||||
|  | 			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 				return decodeErrCodeCorrupt | ||||||
|  | 			} | ||||||
|  | 			length = 1 + int(src[s-3])>>2 | ||||||
|  | 			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | ||||||
|  | 
 | ||||||
|  | 		case tagCopy4: | ||||||
|  | 			s += 5 | ||||||
|  | 			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | ||||||
|  | 				return decodeErrCodeCorrupt | ||||||
|  | 			} | ||||||
|  | 			length = 1 + int(src[s-5])>>2 | ||||||
|  | 			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if offset <= 0 || d < offset || length > len(dst)-d { | ||||||
|  | 			return decodeErrCodeCorrupt | ||||||
|  | 		} | ||||||
|  | 		// Copy from an earlier sub-slice of dst to a later sub-slice. | ||||||
|  | 		// If no overlap, use the built-in copy: | ||||||
|  | 		if offset >= length { | ||||||
|  | 			copy(dst[d:d+length], dst[d-offset:]) | ||||||
|  | 			d += length | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Unlike the built-in copy function, this byte-by-byte copy always runs | ||||||
|  | 		// forwards, even if the slices overlap. Conceptually, this is: | ||||||
|  | 		// | ||||||
|  | 		// d += forwardCopy(dst[d:d+length], dst[d-offset:]) | ||||||
|  | 		// | ||||||
|  | 		// We align the slices into a and b and show the compiler they are the same size. | ||||||
|  | 		// This allows the loop to run without bounds checks. | ||||||
|  | 		a := dst[d : d+length] | ||||||
|  | 		b := dst[d-offset:] | ||||||
|  | 		b = b[:len(a)] | ||||||
|  | 		for i := range a { | ||||||
|  | 			a[i] = b[i] | ||||||
|  | 		} | ||||||
|  | 		d += length | ||||||
|  | 	} | ||||||
|  | 	if d != len(dst) { | ||||||
|  | 		return decodeErrCodeCorrupt | ||||||
|  | 	} | ||||||
|  | 	return 0 | ||||||
|  | } | ||||||
							
								
								
									
										289
									
								
								vendor/github.com/golang/snappy/encode.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										289
									
								
								vendor/github.com/golang/snappy/encode.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,289 @@ | ||||||
|  | // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | package snappy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"encoding/binary" | ||||||
|  | 	"errors" | ||||||
|  | 	"io" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Encode returns the encoded form of src. The returned slice may be a sub- | ||||||
|  | // slice of dst if dst was large enough to hold the entire encoded block. | ||||||
|  | // Otherwise, a newly allocated slice will be returned. | ||||||
|  | // | ||||||
|  | // The dst and src must not overlap. It is valid to pass a nil dst. | ||||||
|  | // | ||||||
|  | // Encode handles the Snappy block format, not the Snappy stream format. | ||||||
|  | func Encode(dst, src []byte) []byte { | ||||||
|  | 	if n := MaxEncodedLen(len(src)); n < 0 { | ||||||
|  | 		panic(ErrTooLarge) | ||||||
|  | 	} else if len(dst) < n { | ||||||
|  | 		dst = make([]byte, n) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// The block starts with the varint-encoded length of the decompressed bytes. | ||||||
|  | 	d := binary.PutUvarint(dst, uint64(len(src))) | ||||||
|  | 
 | ||||||
|  | 	for len(src) > 0 { | ||||||
|  | 		p := src | ||||||
|  | 		src = nil | ||||||
|  | 		if len(p) > maxBlockSize { | ||||||
|  | 			p, src = p[:maxBlockSize], p[maxBlockSize:] | ||||||
|  | 		} | ||||||
|  | 		if len(p) < minNonLiteralBlockSize { | ||||||
|  | 			d += emitLiteral(dst[d:], p) | ||||||
|  | 		} else { | ||||||
|  | 			d += encodeBlock(dst[d:], p) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return dst[:d] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // inputMargin is the minimum number of extra input bytes to keep, inside | ||||||
|  | // encodeBlock's inner loop. On some architectures, this margin lets us | ||||||
|  | // implement a fast path for emitLiteral, where the copy of short (<= 16 byte) | ||||||
|  | // literals can be implemented as a single load to and store from a 16-byte | ||||||
|  | // register. That literal's actual length can be as short as 1 byte, so this | ||||||
|  | // can copy up to 15 bytes too much, but that's OK as subsequent iterations of | ||||||
|  | // the encoding loop will fix up the copy overrun, and this inputMargin ensures | ||||||
|  | // that we don't overrun the dst and src buffers. | ||||||
|  | const inputMargin = 16 - 1 | ||||||
|  | 
 | ||||||
|  | // minNonLiteralBlockSize is the minimum size of the input to encodeBlock that | ||||||
|  | // could be encoded with a copy tag. This is the minimum with respect to the | ||||||
|  | // algorithm used by encodeBlock, not a minimum enforced by the file format. | ||||||
|  | // | ||||||
|  | // The encoded output must start with at least a 1 byte literal, as there are | ||||||
|  | // no previous bytes to copy. A minimal (1 byte) copy after that, generated | ||||||
|  | // from an emitCopy call in encodeBlock's main loop, would require at least | ||||||
|  | // another inputMargin bytes, for the reason above: we want any emitLiteral | ||||||
|  | // calls inside encodeBlock's main loop to use the fast path if possible, which | ||||||
|  | // requires being able to overrun by inputMargin bytes. Thus, | ||||||
|  | // minNonLiteralBlockSize equals 1 + 1 + inputMargin. | ||||||
|  | // | ||||||
|  | // The C++ code doesn't use this exact threshold, but it could, as discussed at | ||||||
|  | // https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion | ||||||
|  | // The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an | ||||||
|  | // optimization. It should not affect the encoded form. This is tested by | ||||||
|  | // TestSameEncodingAsCppShortCopies. | ||||||
|  | const minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
|  | 
 | ||||||
|  | // MaxEncodedLen returns the maximum length of a snappy block, given its | ||||||
|  | // uncompressed length. | ||||||
|  | // | ||||||
|  | // It will return a negative value if srcLen is too large to encode. | ||||||
|  | func MaxEncodedLen(srcLen int) int { | ||||||
|  | 	n := uint64(srcLen) | ||||||
|  | 	if n > 0xffffffff { | ||||||
|  | 		return -1 | ||||||
|  | 	} | ||||||
|  | 	// Compressed data can be defined as: | ||||||
|  | 	//    compressed := item* literal* | ||||||
|  | 	//    item       := literal* copy | ||||||
|  | 	// | ||||||
|  | 	// The trailing literal sequence has a space blowup of at most 62/60 | ||||||
|  | 	// since a literal of length 60 needs one tag byte + one extra byte | ||||||
|  | 	// for length information. | ||||||
|  | 	// | ||||||
|  | 	// Item blowup is trickier to measure. Suppose the "copy" op copies | ||||||
|  | 	// 4 bytes of data. Because of a special check in the encoding code, | ||||||
|  | 	// we produce a 4-byte copy only if the offset is < 65536. Therefore | ||||||
|  | 	// the copy op takes 3 bytes to encode, and this type of item leads | ||||||
|  | 	// to at most the 62/60 blowup for representing literals. | ||||||
|  | 	// | ||||||
|  | 	// Suppose the "copy" op copies 5 bytes of data. If the offset is big | ||||||
|  | 	// enough, it will take 5 bytes to encode the copy op. Therefore the | ||||||
|  | 	// worst case here is a one-byte literal followed by a five-byte copy. | ||||||
|  | 	// That is, 6 bytes of input turn into 7 bytes of "compressed" data. | ||||||
|  | 	// | ||||||
|  | 	// This last factor dominates the blowup, so the final estimate is: | ||||||
|  | 	n = 32 + n + n/6 | ||||||
|  | 	if n > 0xffffffff { | ||||||
|  | 		return -1 | ||||||
|  | 	} | ||||||
|  | 	return int(n) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var errClosed = errors.New("snappy: Writer is closed") | ||||||
|  | 
 | ||||||
|  | // NewWriter returns a new Writer that compresses to w. | ||||||
|  | // | ||||||
|  | // The Writer returned does not buffer writes. There is no need to Flush or | ||||||
|  | // Close such a Writer. | ||||||
|  | // | ||||||
|  | // Deprecated: the Writer returned is not suitable for many small writes, only | ||||||
|  | // for few large writes. Use NewBufferedWriter instead, which is efficient | ||||||
|  | // regardless of the frequency and shape of the writes, and remember to Close | ||||||
|  | // that Writer when done. | ||||||
|  | func NewWriter(w io.Writer) *Writer { | ||||||
|  | 	return &Writer{ | ||||||
|  | 		w:    w, | ||||||
|  | 		obuf: make([]byte, obufLen), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBufferedWriter returns a new Writer that compresses to w, using the | ||||||
|  | // framing format described at | ||||||
|  | // https://github.com/google/snappy/blob/master/framing_format.txt | ||||||
|  | // | ||||||
|  | // The Writer returned buffers writes. Users must call Close to guarantee all | ||||||
|  | // data has been forwarded to the underlying io.Writer. They may also call | ||||||
|  | // Flush zero or more times before calling Close. | ||||||
|  | func NewBufferedWriter(w io.Writer) *Writer { | ||||||
|  | 	return &Writer{ | ||||||
|  | 		w:    w, | ||||||
|  | 		ibuf: make([]byte, 0, maxBlockSize), | ||||||
|  | 		obuf: make([]byte, obufLen), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Writer is an io.Writer that can write Snappy-compressed bytes. | ||||||
|  | // | ||||||
|  | // Writer handles the Snappy stream format, not the Snappy block format. | ||||||
|  | type Writer struct { | ||||||
|  | 	w   io.Writer | ||||||
|  | 	err error | ||||||
|  | 
 | ||||||
|  | 	// ibuf is a buffer for the incoming (uncompressed) bytes. | ||||||
|  | 	// | ||||||
|  | 	// Its use is optional. For backwards compatibility, Writers created by the | ||||||
|  | 	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and | ||||||
|  | 	// therefore do not need to be Flush'ed or Close'd. | ||||||
|  | 	ibuf []byte | ||||||
|  | 
 | ||||||
|  | 	// obuf is a buffer for the outgoing (compressed) bytes. | ||||||
|  | 	obuf []byte | ||||||
|  | 
 | ||||||
|  | 	// wroteStreamHeader is whether we have written the stream header. | ||||||
|  | 	wroteStreamHeader bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Reset discards the writer's state and switches the Snappy writer to write to | ||||||
|  | // w. This permits reusing a Writer rather than allocating a new one. | ||||||
|  | func (w *Writer) Reset(writer io.Writer) { | ||||||
|  | 	w.w = writer | ||||||
|  | 	w.err = nil | ||||||
|  | 	if w.ibuf != nil { | ||||||
|  | 		w.ibuf = w.ibuf[:0] | ||||||
|  | 	} | ||||||
|  | 	w.wroteStreamHeader = false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Write satisfies the io.Writer interface. | ||||||
|  | func (w *Writer) Write(p []byte) (nRet int, errRet error) { | ||||||
|  | 	if w.ibuf == nil { | ||||||
|  | 		// Do not buffer incoming bytes. This does not perform or compress well | ||||||
|  | 		// if the caller of Writer.Write writes many small slices. This | ||||||
|  | 		// behavior is therefore deprecated, but still supported for backwards | ||||||
|  | 		// compatibility with code that doesn't explicitly Flush or Close. | ||||||
|  | 		return w.write(p) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// The remainder of this method is based on bufio.Writer.Write from the | ||||||
|  | 	// standard library. | ||||||
|  | 
 | ||||||
|  | 	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { | ||||||
|  | 		var n int | ||||||
|  | 		if len(w.ibuf) == 0 { | ||||||
|  | 			// Large write, empty buffer. | ||||||
|  | 			// Write directly from p to avoid copy. | ||||||
|  | 			n, _ = w.write(p) | ||||||
|  | 		} else { | ||||||
|  | 			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | ||||||
|  | 			w.ibuf = w.ibuf[:len(w.ibuf)+n] | ||||||
|  | 			w.Flush() | ||||||
|  | 		} | ||||||
|  | 		nRet += n | ||||||
|  | 		p = p[n:] | ||||||
|  | 	} | ||||||
|  | 	if w.err != nil { | ||||||
|  | 		return nRet, w.err | ||||||
|  | 	} | ||||||
|  | 	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | ||||||
|  | 	w.ibuf = w.ibuf[:len(w.ibuf)+n] | ||||||
|  | 	nRet += n | ||||||
|  | 	return nRet, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (w *Writer) write(p []byte) (nRet int, errRet error) { | ||||||
|  | 	if w.err != nil { | ||||||
|  | 		return 0, w.err | ||||||
|  | 	} | ||||||
|  | 	for len(p) > 0 { | ||||||
|  | 		obufStart := len(magicChunk) | ||||||
|  | 		if !w.wroteStreamHeader { | ||||||
|  | 			w.wroteStreamHeader = true | ||||||
|  | 			copy(w.obuf, magicChunk) | ||||||
|  | 			obufStart = 0 | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var uncompressed []byte | ||||||
|  | 		if len(p) > maxBlockSize { | ||||||
|  | 			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] | ||||||
|  | 		} else { | ||||||
|  | 			uncompressed, p = p, nil | ||||||
|  | 		} | ||||||
|  | 		checksum := crc(uncompressed) | ||||||
|  | 
 | ||||||
|  | 		// Compress the buffer, discarding the result if the improvement | ||||||
|  | 		// isn't at least 12.5%. | ||||||
|  | 		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) | ||||||
|  | 		chunkType := uint8(chunkTypeCompressedData) | ||||||
|  | 		chunkLen := 4 + len(compressed) | ||||||
|  | 		obufEnd := obufHeaderLen + len(compressed) | ||||||
|  | 		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { | ||||||
|  | 			chunkType = chunkTypeUncompressedData | ||||||
|  | 			chunkLen = 4 + len(uncompressed) | ||||||
|  | 			obufEnd = obufHeaderLen | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Fill in the per-chunk header that comes before the body. | ||||||
|  | 		w.obuf[len(magicChunk)+0] = chunkType | ||||||
|  | 		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) | ||||||
|  | 		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) | ||||||
|  | 		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) | ||||||
|  | 		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) | ||||||
|  | 		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) | ||||||
|  | 		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) | ||||||
|  | 		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) | ||||||
|  | 
 | ||||||
|  | 		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { | ||||||
|  | 			w.err = err | ||||||
|  | 			return nRet, err | ||||||
|  | 		} | ||||||
|  | 		if chunkType == chunkTypeUncompressedData { | ||||||
|  | 			if _, err := w.w.Write(uncompressed); err != nil { | ||||||
|  | 				w.err = err | ||||||
|  | 				return nRet, err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		nRet += len(uncompressed) | ||||||
|  | 	} | ||||||
|  | 	return nRet, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Flush flushes the Writer to its underlying io.Writer. | ||||||
|  | func (w *Writer) Flush() error { | ||||||
|  | 	if w.err != nil { | ||||||
|  | 		return w.err | ||||||
|  | 	} | ||||||
|  | 	if len(w.ibuf) == 0 { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	w.write(w.ibuf) | ||||||
|  | 	w.ibuf = w.ibuf[:0] | ||||||
|  | 	return w.err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close calls Flush and then closes the Writer. | ||||||
|  | func (w *Writer) Close() error { | ||||||
|  | 	w.Flush() | ||||||
|  | 	ret := w.err | ||||||
|  | 	if w.err == nil { | ||||||
|  | 		w.err = errClosed | ||||||
|  | 	} | ||||||
|  | 	return ret | ||||||
|  | } | ||||||
							
								
								
									
										730
									
								
								vendor/github.com/golang/snappy/encode_amd64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										730
									
								
								vendor/github.com/golang/snappy/encode_amd64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,730 @@ | ||||||
|  | // Copyright 2016 The Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | 
 | ||||||
|  | #include "textflag.h" | ||||||
|  | 
 | ||||||
|  | // The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a | ||||||
|  | // Go toolchain regression. See https://github.com/golang/go/issues/15426 and | ||||||
|  | // https://github.com/golang/snappy/issues/29 | ||||||
|  | // | ||||||
|  | // As a workaround, the package was built with a known good assembler, and | ||||||
|  | // those instructions were disassembled by "objdump -d" to yield the | ||||||
|  | //	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15 | ||||||
|  | // style comments, in AT&T asm syntax. Note that rsp here is a physical | ||||||
|  | // register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). | ||||||
|  | // The instructions were then encoded as "BYTE $0x.." sequences, which assemble | ||||||
|  | // fine on Go 1.6. | ||||||
|  | 
 | ||||||
|  | // The asm code generally follows the pure Go code in encode_other.go, except | ||||||
|  | // where marked with a "!!!". | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func emitLiteral(dst, lit []byte) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The register allocation: | ||||||
|  | //	- AX	len(lit) | ||||||
|  | //	- BX	n | ||||||
|  | //	- DX	return value | ||||||
|  | //	- DI	&dst[i] | ||||||
|  | //	- R10	&lit[0] | ||||||
|  | // | ||||||
|  | // The 24 bytes of stack space is to call runtime·memmove. | ||||||
|  | // | ||||||
|  | // The unusual register allocation of local variables, such as R10 for the | ||||||
|  | // source pointer, matches the allocation used at the call site in encodeBlock, | ||||||
|  | // which makes it easier to manually inline this function. | ||||||
|  | TEXT ·emitLiteral(SB), NOSPLIT, $24-56 | ||||||
|  | 	MOVQ dst_base+0(FP), DI | ||||||
|  | 	MOVQ lit_base+24(FP), R10 | ||||||
|  | 	MOVQ lit_len+32(FP), AX | ||||||
|  | 	MOVQ AX, DX | ||||||
|  | 	MOVL AX, BX | ||||||
|  | 	SUBL $1, BX | ||||||
|  | 
 | ||||||
|  | 	CMPL BX, $60 | ||||||
|  | 	JLT  oneByte | ||||||
|  | 	CMPL BX, $256 | ||||||
|  | 	JLT  twoBytes | ||||||
|  | 
 | ||||||
|  | threeBytes: | ||||||
|  | 	MOVB $0xf4, 0(DI) | ||||||
|  | 	MOVW BX, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 	ADDQ $3, DX | ||||||
|  | 	JMP  memmove | ||||||
|  | 
 | ||||||
|  | twoBytes: | ||||||
|  | 	MOVB $0xf0, 0(DI) | ||||||
|  | 	MOVB BX, 1(DI) | ||||||
|  | 	ADDQ $2, DI | ||||||
|  | 	ADDQ $2, DX | ||||||
|  | 	JMP  memmove | ||||||
|  | 
 | ||||||
|  | oneByte: | ||||||
|  | 	SHLB $2, BX | ||||||
|  | 	MOVB BX, 0(DI) | ||||||
|  | 	ADDQ $1, DI | ||||||
|  | 	ADDQ $1, DX | ||||||
|  | 
 | ||||||
|  | memmove: | ||||||
|  | 	MOVQ DX, ret+48(FP) | ||||||
|  | 
 | ||||||
|  | 	// copy(dst[i:], lit) | ||||||
|  | 	// | ||||||
|  | 	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push | ||||||
|  | 	// DI, R10 and AX as arguments. | ||||||
|  | 	MOVQ DI, 0(SP) | ||||||
|  | 	MOVQ R10, 8(SP) | ||||||
|  | 	MOVQ AX, 16(SP) | ||||||
|  | 	CALL runtime·memmove(SB) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func emitCopy(dst []byte, offset, length int) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The register allocation: | ||||||
|  | //	- AX	length | ||||||
|  | //	- SI	&dst[0] | ||||||
|  | //	- DI	&dst[i] | ||||||
|  | //	- R11	offset | ||||||
|  | // | ||||||
|  | // The unusual register allocation of local variables, such as R11 for the | ||||||
|  | // offset, matches the allocation used at the call site in encodeBlock, which | ||||||
|  | // makes it easier to manually inline this function. | ||||||
|  | TEXT ·emitCopy(SB), NOSPLIT, $0-48 | ||||||
|  | 	MOVQ dst_base+0(FP), DI | ||||||
|  | 	MOVQ DI, SI | ||||||
|  | 	MOVQ offset+24(FP), R11 | ||||||
|  | 	MOVQ length+32(FP), AX | ||||||
|  | 
 | ||||||
|  | loop0: | ||||||
|  | 	// for length >= 68 { etc } | ||||||
|  | 	CMPL AX, $68 | ||||||
|  | 	JLT  step1 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 64 copy, encoded as 3 bytes. | ||||||
|  | 	MOVB $0xfe, 0(DI) | ||||||
|  | 	MOVW R11, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 	SUBL $64, AX | ||||||
|  | 	JMP  loop0 | ||||||
|  | 
 | ||||||
|  | step1: | ||||||
|  | 	// if length > 64 { etc } | ||||||
|  | 	CMPL AX, $64 | ||||||
|  | 	JLE  step2 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 60 copy, encoded as 3 bytes. | ||||||
|  | 	MOVB $0xee, 0(DI) | ||||||
|  | 	MOVW R11, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 	SUBL $60, AX | ||||||
|  | 
 | ||||||
|  | step2: | ||||||
|  | 	// if length >= 12 || offset >= 2048 { goto step3 } | ||||||
|  | 	CMPL AX, $12 | ||||||
|  | 	JGE  step3 | ||||||
|  | 	CMPL R11, $2048 | ||||||
|  | 	JGE  step3 | ||||||
|  | 
 | ||||||
|  | 	// Emit the remaining copy, encoded as 2 bytes. | ||||||
|  | 	MOVB R11, 1(DI) | ||||||
|  | 	SHRL $8, R11 | ||||||
|  | 	SHLB $5, R11 | ||||||
|  | 	SUBB $4, AX | ||||||
|  | 	SHLB $2, AX | ||||||
|  | 	ORB  AX, R11 | ||||||
|  | 	ORB  $1, R11 | ||||||
|  | 	MOVB R11, 0(DI) | ||||||
|  | 	ADDQ $2, DI | ||||||
|  | 
 | ||||||
|  | 	// Return the number of bytes written. | ||||||
|  | 	SUBQ SI, DI | ||||||
|  | 	MOVQ DI, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | step3: | ||||||
|  | 	// Emit the remaining copy, encoded as 3 bytes. | ||||||
|  | 	SUBL $1, AX | ||||||
|  | 	SHLB $2, AX | ||||||
|  | 	ORB  $2, AX | ||||||
|  | 	MOVB AX, 0(DI) | ||||||
|  | 	MOVW R11, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 
 | ||||||
|  | 	// Return the number of bytes written. | ||||||
|  | 	SUBQ SI, DI | ||||||
|  | 	MOVQ DI, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func extendMatch(src []byte, i, j int) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The register allocation: | ||||||
|  | //	- DX	&src[0] | ||||||
|  | //	- SI	&src[j] | ||||||
|  | //	- R13	&src[len(src) - 8] | ||||||
|  | //	- R14	&src[len(src)] | ||||||
|  | //	- R15	&src[i] | ||||||
|  | // | ||||||
|  | // The unusual register allocation of local variables, such as R15 for a source | ||||||
|  | // pointer, matches the allocation used at the call site in encodeBlock, which | ||||||
|  | // makes it easier to manually inline this function. | ||||||
|  | TEXT ·extendMatch(SB), NOSPLIT, $0-48 | ||||||
|  | 	MOVQ src_base+0(FP), DX | ||||||
|  | 	MOVQ src_len+8(FP), R14 | ||||||
|  | 	MOVQ i+24(FP), R15 | ||||||
|  | 	MOVQ j+32(FP), SI | ||||||
|  | 	ADDQ DX, R14 | ||||||
|  | 	ADDQ DX, R15 | ||||||
|  | 	ADDQ DX, SI | ||||||
|  | 	MOVQ R14, R13 | ||||||
|  | 	SUBQ $8, R13 | ||||||
|  | 
 | ||||||
|  | cmp8: | ||||||
|  | 	// As long as we are 8 or more bytes before the end of src, we can load and | ||||||
|  | 	// compare 8 bytes at a time. If those 8 bytes are equal, repeat. | ||||||
|  | 	CMPQ SI, R13 | ||||||
|  | 	JA   cmp1 | ||||||
|  | 	MOVQ (R15), AX | ||||||
|  | 	MOVQ (SI), BX | ||||||
|  | 	CMPQ AX, BX | ||||||
|  | 	JNE  bsf | ||||||
|  | 	ADDQ $8, R15 | ||||||
|  | 	ADDQ $8, SI | ||||||
|  | 	JMP  cmp8 | ||||||
|  | 
 | ||||||
|  | bsf: | ||||||
|  | 	// If those 8 bytes were not equal, XOR the two 8 byte values, and return | ||||||
|  | 	// the index of the first byte that differs. The BSF instruction finds the | ||||||
|  | 	// least significant 1 bit, the amd64 architecture is little-endian, and | ||||||
|  | 	// the shift by 3 converts a bit index to a byte index. | ||||||
|  | 	XORQ AX, BX | ||||||
|  | 	BSFQ BX, BX | ||||||
|  | 	SHRQ $3, BX | ||||||
|  | 	ADDQ BX, SI | ||||||
|  | 
 | ||||||
|  | 	// Convert from &src[ret] to ret. | ||||||
|  | 	SUBQ DX, SI | ||||||
|  | 	MOVQ SI, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | cmp1: | ||||||
|  | 	// In src's tail, compare 1 byte at a time. | ||||||
|  | 	CMPQ SI, R14 | ||||||
|  | 	JAE  extendMatchEnd | ||||||
|  | 	MOVB (R15), AX | ||||||
|  | 	MOVB (SI), BX | ||||||
|  | 	CMPB AX, BX | ||||||
|  | 	JNE  extendMatchEnd | ||||||
|  | 	ADDQ $1, R15 | ||||||
|  | 	ADDQ $1, SI | ||||||
|  | 	JMP  cmp1 | ||||||
|  | 
 | ||||||
|  | extendMatchEnd: | ||||||
|  | 	// Convert from &src[ret] to ret. | ||||||
|  | 	SUBQ DX, SI | ||||||
|  | 	MOVQ SI, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func encodeBlock(dst, src []byte) (d int) | ||||||
|  | // | ||||||
|  | // All local variables fit into registers, other than "var table". The register | ||||||
|  | // allocation: | ||||||
|  | //	- AX	.	. | ||||||
|  | //	- BX	.	. | ||||||
|  | //	- CX	56	shift (note that amd64 shifts by non-immediates must use CX). | ||||||
|  | //	- DX	64	&src[0], tableSize | ||||||
|  | //	- SI	72	&src[s] | ||||||
|  | //	- DI	80	&dst[d] | ||||||
|  | //	- R9	88	sLimit | ||||||
|  | //	- R10	.	&src[nextEmit] | ||||||
|  | //	- R11	96	prevHash, currHash, nextHash, offset | ||||||
|  | //	- R12	104	&src[base], skip | ||||||
|  | //	- R13	.	&src[nextS], &src[len(src) - 8] | ||||||
|  | //	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x | ||||||
|  | //	- R15	112	candidate | ||||||
|  | // | ||||||
|  | // The second column (56, 64, etc) is the stack offset to spill the registers | ||||||
|  | // when calling other functions. We could pack this slightly tighter, but it's | ||||||
|  | // simpler to have a dedicated spill map independent of the function called. | ||||||
|  | // | ||||||
|  | // "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An | ||||||
|  | // extra 56 bytes, to call other functions, and an extra 64 bytes, to spill | ||||||
|  | // local variables (registers) during calls gives 32768 + 56 + 64 = 32888. | ||||||
|  | TEXT ·encodeBlock(SB), 0, $32888-56 | ||||||
|  | 	MOVQ dst_base+0(FP), DI | ||||||
|  | 	MOVQ src_base+24(FP), SI | ||||||
|  | 	MOVQ src_len+32(FP), R14 | ||||||
|  | 
 | ||||||
|  | 	// shift, tableSize := uint32(32-8), 1<<8 | ||||||
|  | 	MOVQ $24, CX | ||||||
|  | 	MOVQ $256, DX | ||||||
|  | 
 | ||||||
|  | calcShift: | ||||||
|  | 	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
 | ||||||
|  | 	//	shift-- | ||||||
|  | 	// } | ||||||
|  | 	CMPQ DX, $16384 | ||||||
|  | 	JGE  varTable | ||||||
|  | 	CMPQ DX, R14 | ||||||
|  | 	JGE  varTable | ||||||
|  | 	SUBQ $1, CX | ||||||
|  | 	SHLQ $1, DX | ||||||
|  | 	JMP  calcShift | ||||||
|  | 
 | ||||||
|  | varTable: | ||||||
|  | 	// var table [maxTableSize]uint16 | ||||||
|  | 	// | ||||||
|  | 	// In the asm code, unlike the Go code, we can zero-initialize only the | ||||||
|  | 	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU | ||||||
|  | 	// writes 16 bytes, so we can do only tableSize/8 writes instead of the | ||||||
|  | 	// 2048 writes that would zero-initialize all of table's 32768 bytes. | ||||||
|  | 	SHRQ $3, DX | ||||||
|  | 	LEAQ table-32768(SP), BX | ||||||
|  | 	PXOR X0, X0 | ||||||
|  | 
 | ||||||
|  | memclr: | ||||||
|  | 	MOVOU X0, 0(BX) | ||||||
|  | 	ADDQ  $16, BX | ||||||
|  | 	SUBQ  $1, DX | ||||||
|  | 	JNZ   memclr | ||||||
|  | 
 | ||||||
|  | 	// !!! DX = &src[0] | ||||||
|  | 	MOVQ SI, DX | ||||||
|  | 
 | ||||||
|  | 	// sLimit := len(src) - inputMargin | ||||||
|  | 	MOVQ R14, R9 | ||||||
|  | 	SUBQ $15, R9 | ||||||
|  | 
 | ||||||
|  | 	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't | ||||||
|  | 	// change for the rest of the function. | ||||||
|  | 	MOVQ CX, 56(SP) | ||||||
|  | 	MOVQ DX, 64(SP) | ||||||
|  | 	MOVQ R9, 88(SP) | ||||||
|  | 
 | ||||||
|  | 	// nextEmit := 0 | ||||||
|  | 	MOVQ DX, R10 | ||||||
|  | 
 | ||||||
|  | 	// s := 1 | ||||||
|  | 	ADDQ $1, SI | ||||||
|  | 
 | ||||||
|  | 	// nextHash := hash(load32(src, s), shift) | ||||||
|  | 	MOVL  0(SI), R11 | ||||||
|  | 	IMULL $0x1e35a7bd, R11 | ||||||
|  | 	SHRL  CX, R11 | ||||||
|  | 
 | ||||||
|  | outer: | ||||||
|  | 	// for { etc } | ||||||
|  | 
 | ||||||
|  | 	// skip := 32 | ||||||
|  | 	MOVQ $32, R12 | ||||||
|  | 
 | ||||||
|  | 	// nextS := s | ||||||
|  | 	MOVQ SI, R13 | ||||||
|  | 
 | ||||||
|  | 	// candidate := 0 | ||||||
|  | 	MOVQ $0, R15 | ||||||
|  | 
 | ||||||
|  | inner0: | ||||||
|  | 	// for { etc } | ||||||
|  | 
 | ||||||
|  | 	// s := nextS | ||||||
|  | 	MOVQ R13, SI | ||||||
|  | 
 | ||||||
|  | 	// bytesBetweenHashLookups := skip >> 5 | ||||||
|  | 	MOVQ R12, R14 | ||||||
|  | 	SHRQ $5, R14 | ||||||
|  | 
 | ||||||
|  | 	// nextS = s + bytesBetweenHashLookups | ||||||
|  | 	ADDQ R14, R13 | ||||||
|  | 
 | ||||||
|  | 	// skip += bytesBetweenHashLookups | ||||||
|  | 	ADDQ R14, R12 | ||||||
|  | 
 | ||||||
|  | 	// if nextS > sLimit { goto emitRemainder } | ||||||
|  | 	MOVQ R13, AX | ||||||
|  | 	SUBQ DX, AX | ||||||
|  | 	CMPQ AX, R9 | ||||||
|  | 	JA   emitRemainder | ||||||
|  | 
 | ||||||
|  | 	// candidate = int(table[nextHash]) | ||||||
|  | 	// XXX: MOVWQZX table-32768(SP)(R11*2), R15 | ||||||
|  | 	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15 | ||||||
|  | 	BYTE $0x4e | ||||||
|  | 	BYTE $0x0f | ||||||
|  | 	BYTE $0xb7 | ||||||
|  | 	BYTE $0x7c | ||||||
|  | 	BYTE $0x5c | ||||||
|  | 	BYTE $0x78 | ||||||
|  | 
 | ||||||
|  | 	// table[nextHash] = uint16(s) | ||||||
|  | 	MOVQ SI, AX | ||||||
|  | 	SUBQ DX, AX | ||||||
|  | 
 | ||||||
|  | 	// XXX: MOVW AX, table-32768(SP)(R11*2) | ||||||
|  | 	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2) | ||||||
|  | 	BYTE $0x66 | ||||||
|  | 	BYTE $0x42 | ||||||
|  | 	BYTE $0x89 | ||||||
|  | 	BYTE $0x44 | ||||||
|  | 	BYTE $0x5c | ||||||
|  | 	BYTE $0x78 | ||||||
|  | 
 | ||||||
|  | 	// nextHash = hash(load32(src, nextS), shift) | ||||||
|  | 	MOVL  0(R13), R11 | ||||||
|  | 	IMULL $0x1e35a7bd, R11 | ||||||
|  | 	SHRL  CX, R11 | ||||||
|  | 
 | ||||||
|  | 	// if load32(src, s) != load32(src, candidate) { continue } break | ||||||
|  | 	MOVL 0(SI), AX | ||||||
|  | 	MOVL (DX)(R15*1), BX | ||||||
|  | 	CMPL AX, BX | ||||||
|  | 	JNE  inner0 | ||||||
|  | 
 | ||||||
|  | fourByteMatch: | ||||||
|  | 	// As per the encode_other.go code: | ||||||
|  | 	// | ||||||
|  | 	// A 4-byte match has been found. We'll later see etc. | ||||||
|  | 
 | ||||||
|  | 	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment | ||||||
|  | 	// on inputMargin in encode.go. | ||||||
|  | 	MOVQ SI, AX | ||||||
|  | 	SUBQ R10, AX | ||||||
|  | 	CMPQ AX, $16 | ||||||
|  | 	JLE  emitLiteralFastPath | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// Begin inline of the emitLiteral call. | ||||||
|  | 	// | ||||||
|  | 	// d += emitLiteral(dst[d:], src[nextEmit:s]) | ||||||
|  | 
 | ||||||
|  | 	MOVL AX, BX | ||||||
|  | 	SUBL $1, BX | ||||||
|  | 
 | ||||||
|  | 	CMPL BX, $60 | ||||||
|  | 	JLT  inlineEmitLiteralOneByte | ||||||
|  | 	CMPL BX, $256 | ||||||
|  | 	JLT  inlineEmitLiteralTwoBytes | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralThreeBytes: | ||||||
|  | 	MOVB $0xf4, 0(DI) | ||||||
|  | 	MOVW BX, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 	JMP  inlineEmitLiteralMemmove | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralTwoBytes: | ||||||
|  | 	MOVB $0xf0, 0(DI) | ||||||
|  | 	MOVB BX, 1(DI) | ||||||
|  | 	ADDQ $2, DI | ||||||
|  | 	JMP  inlineEmitLiteralMemmove | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralOneByte: | ||||||
|  | 	SHLB $2, BX | ||||||
|  | 	MOVB BX, 0(DI) | ||||||
|  | 	ADDQ $1, DI | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralMemmove: | ||||||
|  | 	// Spill local variables (registers) onto the stack; call; unspill.
 | ||||||
|  | 	// | ||||||
|  | 	// copy(dst[i:], lit) | ||||||
|  | 	// | ||||||
|  | 	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push | ||||||
|  | 	// DI, R10 and AX as arguments. | ||||||
|  | 	MOVQ DI, 0(SP) | ||||||
|  | 	MOVQ R10, 8(SP) | ||||||
|  | 	MOVQ AX, 16(SP) | ||||||
|  | 	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)". | ||||||
|  | 	MOVQ SI, 72(SP) | ||||||
|  | 	MOVQ DI, 80(SP) | ||||||
|  | 	MOVQ R15, 112(SP) | ||||||
|  | 	CALL runtime·memmove(SB) | ||||||
|  | 	MOVQ 56(SP), CX | ||||||
|  | 	MOVQ 64(SP), DX | ||||||
|  | 	MOVQ 72(SP), SI | ||||||
|  | 	MOVQ 80(SP), DI | ||||||
|  | 	MOVQ 88(SP), R9 | ||||||
|  | 	MOVQ 112(SP), R15 | ||||||
|  | 	JMP  inner1 | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralEnd: | ||||||
|  | 	// End inline of the emitLiteral call. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | emitLiteralFastPath: | ||||||
|  | 	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". | ||||||
|  | 	MOVB AX, BX | ||||||
|  | 	SUBB $1, BX | ||||||
|  | 	SHLB $2, BX | ||||||
|  | 	MOVB BX, (DI) | ||||||
|  | 	ADDQ $1, DI | ||||||
|  | 
 | ||||||
|  | 	// !!! Implement the copy from lit to dst as a 16-byte load and store. | ||||||
|  | 	// (Encode's documentation says that dst and src must not overlap.) | ||||||
|  | 	// | ||||||
|  | 	// This always copies 16 bytes, instead of only len(lit) bytes, but that's | ||||||
|  | 	// OK. Subsequent iterations will fix up the overrun. | ||||||
|  | 	// | ||||||
|  | 	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or | ||||||
|  | 	// 16-byte loads and stores. This technique probably wouldn't be as | ||||||
|  | 	// effective on architectures that are fussier about alignment. | ||||||
|  | 	MOVOU 0(R10), X0 | ||||||
|  | 	MOVOU X0, 0(DI) | ||||||
|  | 	ADDQ  AX, DI | ||||||
|  | 
 | ||||||
|  | inner1: | ||||||
|  | 	// for { etc } | ||||||
|  | 
 | ||||||
|  | 	// base := s | ||||||
|  | 	MOVQ SI, R12 | ||||||
|  | 
 | ||||||
|  | 	// !!! offset := base - candidate | ||||||
|  | 	MOVQ R12, R11 | ||||||
|  | 	SUBQ R15, R11 | ||||||
|  | 	SUBQ DX, R11 | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// Begin inline of the extendMatch call. | ||||||
|  | 	// | ||||||
|  | 	// s = extendMatch(src, candidate+4, s+4) | ||||||
|  | 
 | ||||||
|  | 	// !!! R14 = &src[len(src)] | ||||||
|  | 	MOVQ src_len+32(FP), R14 | ||||||
|  | 	ADDQ DX, R14 | ||||||
|  | 
 | ||||||
|  | 	// !!! R13 = &src[len(src) - 8] | ||||||
|  | 	MOVQ R14, R13 | ||||||
|  | 	SUBQ $8, R13 | ||||||
|  | 
 | ||||||
|  | 	// !!! R15 = &src[candidate + 4] | ||||||
|  | 	ADDQ $4, R15 | ||||||
|  | 	ADDQ DX, R15 | ||||||
|  | 
 | ||||||
|  | 	// !!! s += 4 | ||||||
|  | 	ADDQ $4, SI | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchCmp8: | ||||||
|  | 	// As long as we are 8 or more bytes before the end of src, we can load and | ||||||
|  | 	// compare 8 bytes at a time. If those 8 bytes are equal, repeat. | ||||||
|  | 	CMPQ SI, R13 | ||||||
|  | 	JA   inlineExtendMatchCmp1 | ||||||
|  | 	MOVQ (R15), AX | ||||||
|  | 	MOVQ (SI), BX | ||||||
|  | 	CMPQ AX, BX | ||||||
|  | 	JNE  inlineExtendMatchBSF | ||||||
|  | 	ADDQ $8, R15 | ||||||
|  | 	ADDQ $8, SI | ||||||
|  | 	JMP  inlineExtendMatchCmp8 | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchBSF: | ||||||
|  | 	// If those 8 bytes were not equal, XOR the two 8 byte values, and return | ||||||
|  | 	// the index of the first byte that differs. The BSF instruction finds the | ||||||
|  | 	// least significant 1 bit, the amd64 architecture is little-endian, and | ||||||
|  | 	// the shift by 3 converts a bit index to a byte index. | ||||||
|  | 	XORQ AX, BX | ||||||
|  | 	BSFQ BX, BX | ||||||
|  | 	SHRQ $3, BX | ||||||
|  | 	ADDQ BX, SI | ||||||
|  | 	JMP  inlineExtendMatchEnd | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchCmp1: | ||||||
|  | 	// In src's tail, compare 1 byte at a time. | ||||||
|  | 	CMPQ SI, R14 | ||||||
|  | 	JAE  inlineExtendMatchEnd | ||||||
|  | 	MOVB (R15), AX | ||||||
|  | 	MOVB (SI), BX | ||||||
|  | 	CMPB AX, BX | ||||||
|  | 	JNE  inlineExtendMatchEnd | ||||||
|  | 	ADDQ $1, R15 | ||||||
|  | 	ADDQ $1, SI | ||||||
|  | 	JMP  inlineExtendMatchCmp1 | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchEnd: | ||||||
|  | 	// End inline of the extendMatch call. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// Begin inline of the emitCopy call. | ||||||
|  | 	// | ||||||
|  | 	// d += emitCopy(dst[d:], base-candidate, s-base) | ||||||
|  | 
 | ||||||
|  | 	// !!! length := s - base | ||||||
|  | 	MOVQ SI, AX | ||||||
|  | 	SUBQ R12, AX | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyLoop0: | ||||||
|  | 	// for length >= 68 { etc } | ||||||
|  | 	CMPL AX, $68 | ||||||
|  | 	JLT  inlineEmitCopyStep1 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 64 copy, encoded as 3 bytes. | ||||||
|  | 	MOVB $0xfe, 0(DI) | ||||||
|  | 	MOVW R11, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 	SUBL $64, AX | ||||||
|  | 	JMP  inlineEmitCopyLoop0 | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyStep1: | ||||||
|  | 	// if length > 64 { etc } | ||||||
|  | 	CMPL AX, $64 | ||||||
|  | 	JLE  inlineEmitCopyStep2 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 60 copy, encoded as 3 bytes. | ||||||
|  | 	MOVB $0xee, 0(DI) | ||||||
|  | 	MOVW R11, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 	SUBL $60, AX | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyStep2: | ||||||
|  | 	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } | ||||||
|  | 	CMPL AX, $12 | ||||||
|  | 	JGE  inlineEmitCopyStep3 | ||||||
|  | 	CMPL R11, $2048 | ||||||
|  | 	JGE  inlineEmitCopyStep3 | ||||||
|  | 
 | ||||||
|  | 	// Emit the remaining copy, encoded as 2 bytes. | ||||||
|  | 	MOVB R11, 1(DI) | ||||||
|  | 	SHRL $8, R11 | ||||||
|  | 	SHLB $5, R11 | ||||||
|  | 	SUBB $4, AX | ||||||
|  | 	SHLB $2, AX | ||||||
|  | 	ORB  AX, R11 | ||||||
|  | 	ORB  $1, R11 | ||||||
|  | 	MOVB R11, 0(DI) | ||||||
|  | 	ADDQ $2, DI | ||||||
|  | 	JMP  inlineEmitCopyEnd | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyStep3: | ||||||
|  | 	// Emit the remaining copy, encoded as 3 bytes. | ||||||
|  | 	SUBL $1, AX | ||||||
|  | 	SHLB $2, AX | ||||||
|  | 	ORB  $2, AX | ||||||
|  | 	MOVB AX, 0(DI) | ||||||
|  | 	MOVW R11, 1(DI) | ||||||
|  | 	ADDQ $3, DI | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyEnd: | ||||||
|  | 	// End inline of the emitCopy call. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | 	// nextEmit = s | ||||||
|  | 	MOVQ SI, R10 | ||||||
|  | 
 | ||||||
|  | 	// if s >= sLimit { goto emitRemainder } | ||||||
|  | 	MOVQ SI, AX | ||||||
|  | 	SUBQ DX, AX | ||||||
|  | 	CMPQ AX, R9 | ||||||
|  | 	JAE  emitRemainder | ||||||
|  | 
 | ||||||
|  | 	// As per the encode_other.go code: | ||||||
|  | 	// | ||||||
|  | 	// We could immediately etc. | ||||||
|  | 
 | ||||||
|  | 	// x := load64(src, s-1) | ||||||
|  | 	MOVQ -1(SI), R14 | ||||||
|  | 
 | ||||||
|  | 	// prevHash := hash(uint32(x>>0), shift) | ||||||
|  | 	MOVL  R14, R11 | ||||||
|  | 	IMULL $0x1e35a7bd, R11 | ||||||
|  | 	SHRL  CX, R11 | ||||||
|  | 
 | ||||||
|  | 	// table[prevHash] = uint16(s-1) | ||||||
|  | 	MOVQ SI, AX | ||||||
|  | 	SUBQ DX, AX | ||||||
|  | 	SUBQ $1, AX | ||||||
|  | 
 | ||||||
|  | 	// XXX: MOVW AX, table-32768(SP)(R11*2) | ||||||
|  | 	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2) | ||||||
|  | 	BYTE $0x66 | ||||||
|  | 	BYTE $0x42 | ||||||
|  | 	BYTE $0x89 | ||||||
|  | 	BYTE $0x44 | ||||||
|  | 	BYTE $0x5c | ||||||
|  | 	BYTE $0x78 | ||||||
|  | 
 | ||||||
|  | 	// currHash := hash(uint32(x>>8), shift) | ||||||
|  | 	SHRQ  $8, R14 | ||||||
|  | 	MOVL  R14, R11 | ||||||
|  | 	IMULL $0x1e35a7bd, R11 | ||||||
|  | 	SHRL  CX, R11 | ||||||
|  | 
 | ||||||
|  | 	// candidate = int(table[currHash]) | ||||||
|  | 	// XXX: MOVWQZX table-32768(SP)(R11*2), R15 | ||||||
|  | 	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15 | ||||||
|  | 	BYTE $0x4e | ||||||
|  | 	BYTE $0x0f | ||||||
|  | 	BYTE $0xb7 | ||||||
|  | 	BYTE $0x7c | ||||||
|  | 	BYTE $0x5c | ||||||
|  | 	BYTE $0x78 | ||||||
|  | 
 | ||||||
|  | 	// table[currHash] = uint16(s) | ||||||
|  | 	ADDQ $1, AX | ||||||
|  | 
 | ||||||
|  | 	// XXX: MOVW AX, table-32768(SP)(R11*2) | ||||||
|  | 	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2) | ||||||
|  | 	BYTE $0x66 | ||||||
|  | 	BYTE $0x42 | ||||||
|  | 	BYTE $0x89 | ||||||
|  | 	BYTE $0x44 | ||||||
|  | 	BYTE $0x5c | ||||||
|  | 	BYTE $0x78 | ||||||
|  | 
 | ||||||
|  | 	// if uint32(x>>8) == load32(src, candidate) { continue } | ||||||
|  | 	MOVL (DX)(R15*1), BX | ||||||
|  | 	CMPL R14, BX | ||||||
|  | 	JEQ  inner1 | ||||||
|  | 
 | ||||||
|  | 	// nextHash = hash(uint32(x>>16), shift) | ||||||
|  | 	SHRQ  $8, R14 | ||||||
|  | 	MOVL  R14, R11 | ||||||
|  | 	IMULL $0x1e35a7bd, R11 | ||||||
|  | 	SHRL  CX, R11 | ||||||
|  | 
 | ||||||
|  | 	// s++ | ||||||
|  | 	ADDQ $1, SI | ||||||
|  | 
 | ||||||
|  | 	// break out of the inner1 for loop, i.e. continue the outer loop. | ||||||
|  | 	JMP outer | ||||||
|  | 
 | ||||||
|  | emitRemainder: | ||||||
|  | 	// if nextEmit < len(src) { etc } | ||||||
|  | 	MOVQ src_len+32(FP), AX | ||||||
|  | 	ADDQ DX, AX | ||||||
|  | 	CMPQ R10, AX | ||||||
|  | 	JEQ  encodeBlockEnd | ||||||
|  | 
 | ||||||
|  | 	// d += emitLiteral(dst[d:], src[nextEmit:]) | ||||||
|  | 	// | ||||||
|  | 	// Push args. | ||||||
|  | 	MOVQ DI, 0(SP) | ||||||
|  | 	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative. | ||||||
|  | 	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative. | ||||||
|  | 	MOVQ R10, 24(SP) | ||||||
|  | 	SUBQ R10, AX | ||||||
|  | 	MOVQ AX, 32(SP) | ||||||
|  | 	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative. | ||||||
|  | 
 | ||||||
|  | 	// Spill local variables (registers) onto the stack; call; unspill.
 | ||||||
|  | 	MOVQ DI, 80(SP) | ||||||
|  | 	CALL ·emitLiteral(SB) | ||||||
|  | 	MOVQ 80(SP), DI | ||||||
|  | 
 | ||||||
|  | 	// Finish the "d +=" part of "d += emitLiteral(etc)". | ||||||
|  | 	ADDQ 48(SP), DI | ||||||
|  | 
 | ||||||
|  | encodeBlockEnd: | ||||||
|  | 	MOVQ dst_base+0(FP), AX | ||||||
|  | 	SUBQ AX, DI | ||||||
|  | 	MOVQ DI, d+48(FP) | ||||||
|  | 	RET | ||||||
							
								
								
									
										722
									
								
								vendor/github.com/golang/snappy/encode_arm64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										722
									
								
								vendor/github.com/golang/snappy/encode_arm64.s
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,722 @@ | ||||||
|  | // Copyright 2020 The Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | 
 | ||||||
|  | #include "textflag.h" | ||||||
|  | 
 | ||||||
|  | // The asm code generally follows the pure Go code in encode_other.go, except | ||||||
|  | // where marked with a "!!!". | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func emitLiteral(dst, lit []byte) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The register allocation: | ||||||
|  | //	- R3	len(lit) | ||||||
|  | //	- R4	n | ||||||
|  | //	- R6	return value | ||||||
|  | //	- R8	&dst[i] | ||||||
|  | //	- R10	&lit[0] | ||||||
|  | // | ||||||
|  | // The 32 bytes of stack space is to call runtime·memmove. | ||||||
|  | // | ||||||
|  | // The unusual register allocation of local variables, such as R10 for the | ||||||
|  | // source pointer, matches the allocation used at the call site in encodeBlock, | ||||||
|  | // which makes it easier to manually inline this function. | ||||||
|  | TEXT ·emitLiteral(SB), NOSPLIT, $32-56 | ||||||
|  | 	MOVD dst_base+0(FP), R8 | ||||||
|  | 	MOVD lit_base+24(FP), R10 | ||||||
|  | 	MOVD lit_len+32(FP), R3 | ||||||
|  | 	MOVD R3, R6 | ||||||
|  | 	MOVW R3, R4 | ||||||
|  | 	SUBW $1, R4, R4 | ||||||
|  | 
 | ||||||
|  | 	CMPW $60, R4 | ||||||
|  | 	BLT  oneByte | ||||||
|  | 	CMPW $256, R4 | ||||||
|  | 	BLT  twoBytes | ||||||
|  | 
 | ||||||
|  | threeBytes: | ||||||
|  | 	MOVD $0xf4, R2 | ||||||
|  | 	MOVB R2, 0(R8) | ||||||
|  | 	MOVW R4, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 	ADD  $3, R6, R6 | ||||||
|  | 	B    memmove | ||||||
|  | 
 | ||||||
|  | twoBytes: | ||||||
|  | 	MOVD $0xf0, R2 | ||||||
|  | 	MOVB R2, 0(R8) | ||||||
|  | 	MOVB R4, 1(R8) | ||||||
|  | 	ADD  $2, R8, R8 | ||||||
|  | 	ADD  $2, R6, R6 | ||||||
|  | 	B    memmove | ||||||
|  | 
 | ||||||
|  | oneByte: | ||||||
|  | 	LSLW $2, R4, R4 | ||||||
|  | 	MOVB R4, 0(R8) | ||||||
|  | 	ADD  $1, R8, R8 | ||||||
|  | 	ADD  $1, R6, R6 | ||||||
|  | 
 | ||||||
|  | memmove: | ||||||
|  | 	MOVD R6, ret+48(FP) | ||||||
|  | 
 | ||||||
|  | 	// copy(dst[i:], lit) | ||||||
|  | 	// | ||||||
|  | 	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push | ||||||
|  | 	// R8, R10 and R3 as arguments. | ||||||
|  | 	MOVD R8, 8(RSP) | ||||||
|  | 	MOVD R10, 16(RSP) | ||||||
|  | 	MOVD R3, 24(RSP) | ||||||
|  | 	CALL runtime·memmove(SB) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func emitCopy(dst []byte, offset, length int) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The register allocation: | ||||||
|  | //	- R3	length | ||||||
|  | //	- R7	&dst[0] | ||||||
|  | //	- R8	&dst[i] | ||||||
|  | //	- R11	offset | ||||||
|  | // | ||||||
|  | // The unusual register allocation of local variables, such as R11 for the | ||||||
|  | // offset, matches the allocation used at the call site in encodeBlock, which | ||||||
|  | // makes it easier to manually inline this function. | ||||||
|  | TEXT ·emitCopy(SB), NOSPLIT, $0-48 | ||||||
|  | 	MOVD dst_base+0(FP), R8 | ||||||
|  | 	MOVD R8, R7 | ||||||
|  | 	MOVD offset+24(FP), R11 | ||||||
|  | 	MOVD length+32(FP), R3 | ||||||
|  | 
 | ||||||
|  | loop0: | ||||||
|  | 	// for length >= 68 { etc } | ||||||
|  | 	CMPW $68, R3 | ||||||
|  | 	BLT  step1 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 64 copy, encoded as 3 bytes. | ||||||
|  | 	MOVD $0xfe, R2 | ||||||
|  | 	MOVB R2, 0(R8) | ||||||
|  | 	MOVW R11, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 	SUB  $64, R3, R3 | ||||||
|  | 	B    loop0 | ||||||
|  | 
 | ||||||
|  | step1: | ||||||
|  | 	// if length > 64 { etc } | ||||||
|  | 	CMP $64, R3 | ||||||
|  | 	BLE step2 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 60 copy, encoded as 3 bytes. | ||||||
|  | 	MOVD $0xee, R2 | ||||||
|  | 	MOVB R2, 0(R8) | ||||||
|  | 	MOVW R11, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 	SUB  $60, R3, R3 | ||||||
|  | 
 | ||||||
|  | step2: | ||||||
|  | 	// if length >= 12 || offset >= 2048 { goto step3 } | ||||||
|  | 	CMP  $12, R3 | ||||||
|  | 	BGE  step3 | ||||||
|  | 	CMPW $2048, R11 | ||||||
|  | 	BGE  step3 | ||||||
|  | 
 | ||||||
|  | 	// Emit the remaining copy, encoded as 2 bytes. | ||||||
|  | 	MOVB R11, 1(R8) | ||||||
|  | 	LSRW $3, R11, R11 | ||||||
|  | 	AND  $0xe0, R11, R11 | ||||||
|  | 	SUB  $4, R3, R3 | ||||||
|  | 	LSLW $2, R3 | ||||||
|  | 	AND  $0xff, R3, R3 | ||||||
|  | 	ORRW R3, R11, R11 | ||||||
|  | 	ORRW $1, R11, R11 | ||||||
|  | 	MOVB R11, 0(R8) | ||||||
|  | 	ADD  $2, R8, R8 | ||||||
|  | 
 | ||||||
|  | 	// Return the number of bytes written. | ||||||
|  | 	SUB  R7, R8, R8 | ||||||
|  | 	MOVD R8, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | step3: | ||||||
|  | 	// Emit the remaining copy, encoded as 3 bytes. | ||||||
|  | 	SUB  $1, R3, R3 | ||||||
|  | 	AND  $0xff, R3, R3 | ||||||
|  | 	LSLW $2, R3, R3 | ||||||
|  | 	ORRW $2, R3, R3 | ||||||
|  | 	MOVB R3, 0(R8) | ||||||
|  | 	MOVW R11, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 
 | ||||||
|  | 	// Return the number of bytes written. | ||||||
|  | 	SUB  R7, R8, R8 | ||||||
|  | 	MOVD R8, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func extendMatch(src []byte, i, j int) int | ||||||
|  | // | ||||||
|  | // All local variables fit into registers. The register allocation: | ||||||
|  | //	- R6	&src[0] | ||||||
|  | //	- R7	&src[j] | ||||||
|  | //	- R13	&src[len(src) - 8] | ||||||
|  | //	- R14	&src[len(src)] | ||||||
|  | //	- R15	&src[i] | ||||||
|  | // | ||||||
|  | // The unusual register allocation of local variables, such as R15 for a source | ||||||
|  | // pointer, matches the allocation used at the call site in encodeBlock, which | ||||||
|  | // makes it easier to manually inline this function. | ||||||
|  | TEXT ·extendMatch(SB), NOSPLIT, $0-48 | ||||||
|  | 	MOVD src_base+0(FP), R6 | ||||||
|  | 	MOVD src_len+8(FP), R14 | ||||||
|  | 	MOVD i+24(FP), R15 | ||||||
|  | 	MOVD j+32(FP), R7 | ||||||
|  | 	ADD  R6, R14, R14 | ||||||
|  | 	ADD  R6, R15, R15 | ||||||
|  | 	ADD  R6, R7, R7 | ||||||
|  | 	MOVD R14, R13 | ||||||
|  | 	SUB  $8, R13, R13 | ||||||
|  | 
 | ||||||
|  | cmp8: | ||||||
|  | 	// As long as we are 8 or more bytes before the end of src, we can load and | ||||||
|  | 	// compare 8 bytes at a time. If those 8 bytes are equal, repeat. | ||||||
|  | 	CMP  R13, R7 | ||||||
|  | 	BHI  cmp1 | ||||||
|  | 	MOVD (R15), R3 | ||||||
|  | 	MOVD (R7), R4 | ||||||
|  | 	CMP  R4, R3 | ||||||
|  | 	BNE  bsf | ||||||
|  | 	ADD  $8, R15, R15 | ||||||
|  | 	ADD  $8, R7, R7 | ||||||
|  | 	B    cmp8 | ||||||
|  | 
 | ||||||
|  | bsf: | ||||||
|  | 	// If those 8 bytes were not equal, XOR the two 8 byte values, and return | ||||||
|  | 	// the index of the first byte that differs. | ||||||
|  | 	// RBIT reverses the bit order, then CLZ counts the leading zeros, the | ||||||
|  | 	// combination of which finds the least significant bit which is set. | ||||||
|  | 	// The arm64 architecture is little-endian, and the shift by 3 converts | ||||||
|  | 	// a bit index to a byte index. | ||||||
|  | 	EOR  R3, R4, R4 | ||||||
|  | 	RBIT R4, R4 | ||||||
|  | 	CLZ  R4, R4 | ||||||
|  | 	ADD  R4>>3, R7, R7 | ||||||
|  | 
 | ||||||
|  | 	// Convert from &src[ret] to ret. | ||||||
|  | 	SUB  R6, R7, R7 | ||||||
|  | 	MOVD R7, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | cmp1: | ||||||
|  | 	// In src's tail, compare 1 byte at a time. | ||||||
|  | 	CMP  R7, R14 | ||||||
|  | 	BLS  extendMatchEnd | ||||||
|  | 	MOVB (R15), R3 | ||||||
|  | 	MOVB (R7), R4 | ||||||
|  | 	CMP  R4, R3 | ||||||
|  | 	BNE  extendMatchEnd | ||||||
|  | 	ADD  $1, R15, R15 | ||||||
|  | 	ADD  $1, R7, R7 | ||||||
|  | 	B    cmp1 | ||||||
|  | 
 | ||||||
|  | extendMatchEnd: | ||||||
|  | 	// Convert from &src[ret] to ret. | ||||||
|  | 	SUB  R6, R7, R7 | ||||||
|  | 	MOVD R7, ret+40(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | // ---------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  | // func encodeBlock(dst, src []byte) (d int) | ||||||
|  | // | ||||||
|  | // All local variables fit into registers, other than "var table". The register | ||||||
|  | // allocation: | ||||||
|  | //	- R3	.	. | ||||||
|  | //	- R4	.	. | ||||||
|  | //	- R5	64	shift | ||||||
|  | //	- R6	72	&src[0], tableSize | ||||||
|  | //	- R7	80	&src[s] | ||||||
|  | //	- R8	88	&dst[d] | ||||||
|  | //	- R9	96	sLimit | ||||||
|  | //	- R10	.	&src[nextEmit] | ||||||
|  | //	- R11	104	prevHash, currHash, nextHash, offset | ||||||
|  | //	- R12	112	&src[base], skip | ||||||
|  | //	- R13	.	&src[nextS], &src[len(src) - 8] | ||||||
|  | //	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x | ||||||
|  | //	- R15	120	candidate | ||||||
|  | //	- R16	.	hash constant, 0x1e35a7bd | ||||||
|  | //	- R17	.	&table | ||||||
|  | //	- .  	128	table | ||||||
|  | // | ||||||
|  | // The second column (64, 72, etc) is the stack offset to spill the registers | ||||||
|  | // when calling other functions. We could pack this slightly tighter, but it's | ||||||
|  | // simpler to have a dedicated spill map independent of the function called. | ||||||
|  | // | ||||||
|  | // "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An | ||||||
|  | // extra 64 bytes, to call other functions, and an extra 64 bytes, to spill | ||||||
|  | // local variables (registers) during calls gives 32768 + 64 + 64 = 32896. | ||||||
|  | TEXT ·encodeBlock(SB), 0, $32896-56 | ||||||
|  | 	MOVD dst_base+0(FP), R8 | ||||||
|  | 	MOVD src_base+24(FP), R7 | ||||||
|  | 	MOVD src_len+32(FP), R14 | ||||||
|  | 
 | ||||||
|  | 	// shift, tableSize := uint32(32-8), 1<<8 | ||||||
|  | 	MOVD  $24, R5 | ||||||
|  | 	MOVD  $256, R6 | ||||||
|  | 	MOVW  $0xa7bd, R16 | ||||||
|  | 	MOVKW $(0x1e35<<16), R16 | ||||||
|  | 
 | ||||||
|  | calcShift: | ||||||
|  | 	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
 | ||||||
|  | 	//	shift-- | ||||||
|  | 	// } | ||||||
|  | 	MOVD $16384, R2 | ||||||
|  | 	CMP  R2, R6 | ||||||
|  | 	BGE  varTable | ||||||
|  | 	CMP  R14, R6 | ||||||
|  | 	BGE  varTable | ||||||
|  | 	SUB  $1, R5, R5 | ||||||
|  | 	LSL  $1, R6, R6 | ||||||
|  | 	B    calcShift | ||||||
|  | 
 | ||||||
|  | varTable: | ||||||
|  | 	// var table [maxTableSize]uint16 | ||||||
|  | 	// | ||||||
|  | 	// In the asm code, unlike the Go code, we can zero-initialize only the | ||||||
|  | 	// first tableSize elements. Each uint16 element is 2 bytes and each | ||||||
|  | 	// iterations writes 64 bytes, so we can do only tableSize/32 writes | ||||||
|  | 	// instead of the 2048 writes that would zero-initialize all of table's | ||||||
|  | 	// 32768 bytes. This clear could overrun the first tableSize elements, but | ||||||
|  | 	// it won't overrun the allocated stack size. | ||||||
|  | 	ADD  $128, RSP, R17 | ||||||
|  | 	MOVD R17, R4 | ||||||
|  | 
 | ||||||
|  | 	// !!! R6 = &src[tableSize] | ||||||
|  | 	ADD R6<<1, R17, R6 | ||||||
|  | 
 | ||||||
|  | memclr: | ||||||
|  | 	STP.P (ZR, ZR), 64(R4) | ||||||
|  | 	STP   (ZR, ZR), -48(R4) | ||||||
|  | 	STP   (ZR, ZR), -32(R4) | ||||||
|  | 	STP   (ZR, ZR), -16(R4) | ||||||
|  | 	CMP   R4, R6 | ||||||
|  | 	BHI   memclr | ||||||
|  | 
 | ||||||
|  | 	// !!! R6 = &src[0] | ||||||
|  | 	MOVD R7, R6 | ||||||
|  | 
 | ||||||
|  | 	// sLimit := len(src) - inputMargin | ||||||
|  | 	MOVD R14, R9 | ||||||
|  | 	SUB  $15, R9, R9 | ||||||
|  | 
 | ||||||
|  | 	// !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't | ||||||
|  | 	// change for the rest of the function. | ||||||
|  | 	MOVD R5, 64(RSP) | ||||||
|  | 	MOVD R6, 72(RSP) | ||||||
|  | 	MOVD R9, 96(RSP) | ||||||
|  | 
 | ||||||
|  | 	// nextEmit := 0 | ||||||
|  | 	MOVD R6, R10 | ||||||
|  | 
 | ||||||
|  | 	// s := 1 | ||||||
|  | 	ADD $1, R7, R7 | ||||||
|  | 
 | ||||||
|  | 	// nextHash := hash(load32(src, s), shift) | ||||||
|  | 	MOVW 0(R7), R11 | ||||||
|  | 	MULW R16, R11, R11 | ||||||
|  | 	LSRW R5, R11, R11 | ||||||
|  | 
 | ||||||
|  | outer: | ||||||
|  | 	// for { etc } | ||||||
|  | 
 | ||||||
|  | 	// skip := 32 | ||||||
|  | 	MOVD $32, R12 | ||||||
|  | 
 | ||||||
|  | 	// nextS := s | ||||||
|  | 	MOVD R7, R13 | ||||||
|  | 
 | ||||||
|  | 	// candidate := 0 | ||||||
|  | 	MOVD $0, R15 | ||||||
|  | 
 | ||||||
|  | inner0: | ||||||
|  | 	// for { etc } | ||||||
|  | 
 | ||||||
|  | 	// s := nextS | ||||||
|  | 	MOVD R13, R7 | ||||||
|  | 
 | ||||||
|  | 	// bytesBetweenHashLookups := skip >> 5 | ||||||
|  | 	MOVD R12, R14 | ||||||
|  | 	LSR  $5, R14, R14 | ||||||
|  | 
 | ||||||
|  | 	// nextS = s + bytesBetweenHashLookups | ||||||
|  | 	ADD R14, R13, R13 | ||||||
|  | 
 | ||||||
|  | 	// skip += bytesBetweenHashLookups | ||||||
|  | 	ADD R14, R12, R12 | ||||||
|  | 
 | ||||||
|  | 	// if nextS > sLimit { goto emitRemainder } | ||||||
|  | 	MOVD R13, R3 | ||||||
|  | 	SUB  R6, R3, R3 | ||||||
|  | 	CMP  R9, R3 | ||||||
|  | 	BHI  emitRemainder | ||||||
|  | 
 | ||||||
|  | 	// candidate = int(table[nextHash]) | ||||||
|  | 	MOVHU 0(R17)(R11<<1), R15 | ||||||
|  | 
 | ||||||
|  | 	// table[nextHash] = uint16(s) | ||||||
|  | 	MOVD R7, R3 | ||||||
|  | 	SUB  R6, R3, R3 | ||||||
|  | 
 | ||||||
|  | 	MOVH R3, 0(R17)(R11<<1) | ||||||
|  | 
 | ||||||
|  | 	// nextHash = hash(load32(src, nextS), shift) | ||||||
|  | 	MOVW 0(R13), R11 | ||||||
|  | 	MULW R16, R11 | ||||||
|  | 	LSRW R5, R11, R11 | ||||||
|  | 
 | ||||||
|  | 	// if load32(src, s) != load32(src, candidate) { continue } break | ||||||
|  | 	MOVW 0(R7), R3 | ||||||
|  | 	MOVW (R6)(R15*1), R4 | ||||||
|  | 	CMPW R4, R3 | ||||||
|  | 	BNE  inner0 | ||||||
|  | 
 | ||||||
|  | fourByteMatch: | ||||||
|  | 	// As per the encode_other.go code: | ||||||
|  | 	// | ||||||
|  | 	// A 4-byte match has been found. We'll later see etc. | ||||||
|  | 
 | ||||||
|  | 	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment | ||||||
|  | 	// on inputMargin in encode.go. | ||||||
|  | 	MOVD R7, R3 | ||||||
|  | 	SUB  R10, R3, R3 | ||||||
|  | 	CMP  $16, R3 | ||||||
|  | 	BLE  emitLiteralFastPath | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// Begin inline of the emitLiteral call. | ||||||
|  | 	// | ||||||
|  | 	// d += emitLiteral(dst[d:], src[nextEmit:s]) | ||||||
|  | 
 | ||||||
|  | 	MOVW R3, R4 | ||||||
|  | 	SUBW $1, R4, R4 | ||||||
|  | 
 | ||||||
|  | 	MOVW $60, R2 | ||||||
|  | 	CMPW R2, R4 | ||||||
|  | 	BLT  inlineEmitLiteralOneByte | ||||||
|  | 	MOVW $256, R2 | ||||||
|  | 	CMPW R2, R4 | ||||||
|  | 	BLT  inlineEmitLiteralTwoBytes | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralThreeBytes: | ||||||
|  | 	MOVD $0xf4, R1 | ||||||
|  | 	MOVB R1, 0(R8) | ||||||
|  | 	MOVW R4, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 	B    inlineEmitLiteralMemmove | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralTwoBytes: | ||||||
|  | 	MOVD $0xf0, R1 | ||||||
|  | 	MOVB R1, 0(R8) | ||||||
|  | 	MOVB R4, 1(R8) | ||||||
|  | 	ADD  $2, R8, R8 | ||||||
|  | 	B    inlineEmitLiteralMemmove | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralOneByte: | ||||||
|  | 	LSLW $2, R4, R4 | ||||||
|  | 	MOVB R4, 0(R8) | ||||||
|  | 	ADD  $1, R8, R8 | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralMemmove: | ||||||
|  | 	// Spill local variables (registers) onto the stack; call; unspill.
 | ||||||
|  | 	// | ||||||
|  | 	// copy(dst[i:], lit) | ||||||
|  | 	// | ||||||
|  | 	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push | ||||||
|  | 	// R8, R10 and R3 as arguments. | ||||||
|  | 	MOVD R8, 8(RSP) | ||||||
|  | 	MOVD R10, 16(RSP) | ||||||
|  | 	MOVD R3, 24(RSP) | ||||||
|  | 
 | ||||||
|  | 	// Finish the "d +=" part of "d += emitLiteral(etc)". | ||||||
|  | 	ADD   R3, R8, R8 | ||||||
|  | 	MOVD  R7, 80(RSP) | ||||||
|  | 	MOVD  R8, 88(RSP) | ||||||
|  | 	MOVD  R15, 120(RSP) | ||||||
|  | 	CALL  runtime·memmove(SB) | ||||||
|  | 	MOVD  64(RSP), R5 | ||||||
|  | 	MOVD  72(RSP), R6 | ||||||
|  | 	MOVD  80(RSP), R7 | ||||||
|  | 	MOVD  88(RSP), R8 | ||||||
|  | 	MOVD  96(RSP), R9 | ||||||
|  | 	MOVD  120(RSP), R15 | ||||||
|  | 	ADD   $128, RSP, R17 | ||||||
|  | 	MOVW  $0xa7bd, R16 | ||||||
|  | 	MOVKW $(0x1e35<<16), R16 | ||||||
|  | 	B     inner1 | ||||||
|  | 
 | ||||||
|  | inlineEmitLiteralEnd: | ||||||
|  | 	// End inline of the emitLiteral call. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | emitLiteralFastPath: | ||||||
|  | 	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". | ||||||
|  | 	MOVB R3, R4 | ||||||
|  | 	SUBW $1, R4, R4 | ||||||
|  | 	AND  $0xff, R4, R4 | ||||||
|  | 	LSLW $2, R4, R4 | ||||||
|  | 	MOVB R4, (R8) | ||||||
|  | 	ADD  $1, R8, R8 | ||||||
|  | 
 | ||||||
|  | 	// !!! Implement the copy from lit to dst as a 16-byte load and store. | ||||||
|  | 	// (Encode's documentation says that dst and src must not overlap.) | ||||||
|  | 	// | ||||||
|  | 	// This always copies 16 bytes, instead of only len(lit) bytes, but that's | ||||||
|  | 	// OK. Subsequent iterations will fix up the overrun. | ||||||
|  | 	// | ||||||
|  | 	// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or | ||||||
|  | 	// 16-byte loads and stores. This technique probably wouldn't be as | ||||||
|  | 	// effective on architectures that are fussier about alignment. | ||||||
|  | 	LDP 0(R10), (R0, R1) | ||||||
|  | 	STP (R0, R1), 0(R8) | ||||||
|  | 	ADD R3, R8, R8 | ||||||
|  | 
 | ||||||
|  | inner1: | ||||||
|  | 	// for { etc } | ||||||
|  | 
 | ||||||
|  | 	// base := s | ||||||
|  | 	MOVD R7, R12 | ||||||
|  | 
 | ||||||
|  | 	// !!! offset := base - candidate | ||||||
|  | 	MOVD R12, R11 | ||||||
|  | 	SUB  R15, R11, R11 | ||||||
|  | 	SUB  R6, R11, R11 | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// Begin inline of the extendMatch call. | ||||||
|  | 	// | ||||||
|  | 	// s = extendMatch(src, candidate+4, s+4) | ||||||
|  | 
 | ||||||
|  | 	// !!! R14 = &src[len(src)] | ||||||
|  | 	MOVD src_len+32(FP), R14 | ||||||
|  | 	ADD  R6, R14, R14 | ||||||
|  | 
 | ||||||
|  | 	// !!! R13 = &src[len(src) - 8] | ||||||
|  | 	MOVD R14, R13 | ||||||
|  | 	SUB  $8, R13, R13 | ||||||
|  | 
 | ||||||
|  | 	// !!! R15 = &src[candidate + 4] | ||||||
|  | 	ADD $4, R15, R15 | ||||||
|  | 	ADD R6, R15, R15 | ||||||
|  | 
 | ||||||
|  | 	// !!! s += 4 | ||||||
|  | 	ADD $4, R7, R7 | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchCmp8: | ||||||
|  | 	// As long as we are 8 or more bytes before the end of src, we can load and | ||||||
|  | 	// compare 8 bytes at a time. If those 8 bytes are equal, repeat. | ||||||
|  | 	CMP  R13, R7 | ||||||
|  | 	BHI  inlineExtendMatchCmp1 | ||||||
|  | 	MOVD (R15), R3 | ||||||
|  | 	MOVD (R7), R4 | ||||||
|  | 	CMP  R4, R3 | ||||||
|  | 	BNE  inlineExtendMatchBSF | ||||||
|  | 	ADD  $8, R15, R15 | ||||||
|  | 	ADD  $8, R7, R7 | ||||||
|  | 	B    inlineExtendMatchCmp8 | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchBSF: | ||||||
|  | 	// If those 8 bytes were not equal, XOR the two 8 byte values, and return | ||||||
|  | 	// the index of the first byte that differs. | ||||||
|  | 	// RBIT reverses the bit order, then CLZ counts the leading zeros, the | ||||||
|  | 	// combination of which finds the least significant bit which is set. | ||||||
|  | 	// The arm64 architecture is little-endian, and the shift by 3 converts | ||||||
|  | 	// a bit index to a byte index. | ||||||
|  | 	EOR  R3, R4, R4 | ||||||
|  | 	RBIT R4, R4 | ||||||
|  | 	CLZ  R4, R4 | ||||||
|  | 	ADD  R4>>3, R7, R7 | ||||||
|  | 	B    inlineExtendMatchEnd | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchCmp1: | ||||||
|  | 	// In src's tail, compare 1 byte at a time. | ||||||
|  | 	CMP  R7, R14 | ||||||
|  | 	BLS  inlineExtendMatchEnd | ||||||
|  | 	MOVB (R15), R3 | ||||||
|  | 	MOVB (R7), R4 | ||||||
|  | 	CMP  R4, R3 | ||||||
|  | 	BNE  inlineExtendMatchEnd | ||||||
|  | 	ADD  $1, R15, R15 | ||||||
|  | 	ADD  $1, R7, R7 | ||||||
|  | 	B    inlineExtendMatchCmp1 | ||||||
|  | 
 | ||||||
|  | inlineExtendMatchEnd: | ||||||
|  | 	// End inline of the extendMatch call. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 	// Begin inline of the emitCopy call. | ||||||
|  | 	// | ||||||
|  | 	// d += emitCopy(dst[d:], base-candidate, s-base) | ||||||
|  | 
 | ||||||
|  | 	// !!! length := s - base | ||||||
|  | 	MOVD R7, R3 | ||||||
|  | 	SUB  R12, R3, R3 | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyLoop0: | ||||||
|  | 	// for length >= 68 { etc } | ||||||
|  | 	MOVW $68, R2 | ||||||
|  | 	CMPW R2, R3 | ||||||
|  | 	BLT  inlineEmitCopyStep1 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 64 copy, encoded as 3 bytes. | ||||||
|  | 	MOVD $0xfe, R1 | ||||||
|  | 	MOVB R1, 0(R8) | ||||||
|  | 	MOVW R11, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 	SUBW $64, R3, R3 | ||||||
|  | 	B    inlineEmitCopyLoop0 | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyStep1: | ||||||
|  | 	// if length > 64 { etc } | ||||||
|  | 	MOVW $64, R2 | ||||||
|  | 	CMPW R2, R3 | ||||||
|  | 	BLE  inlineEmitCopyStep2 | ||||||
|  | 
 | ||||||
|  | 	// Emit a length 60 copy, encoded as 3 bytes. | ||||||
|  | 	MOVD $0xee, R1 | ||||||
|  | 	MOVB R1, 0(R8) | ||||||
|  | 	MOVW R11, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 	SUBW $60, R3, R3 | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyStep2: | ||||||
|  | 	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } | ||||||
|  | 	MOVW $12, R2 | ||||||
|  | 	CMPW R2, R3 | ||||||
|  | 	BGE  inlineEmitCopyStep3 | ||||||
|  | 	MOVW $2048, R2 | ||||||
|  | 	CMPW R2, R11 | ||||||
|  | 	BGE  inlineEmitCopyStep3 | ||||||
|  | 
 | ||||||
|  | 	// Emit the remaining copy, encoded as 2 bytes. | ||||||
|  | 	MOVB R11, 1(R8) | ||||||
|  | 	LSRW $8, R11, R11 | ||||||
|  | 	LSLW $5, R11, R11 | ||||||
|  | 	SUBW $4, R3, R3 | ||||||
|  | 	AND  $0xff, R3, R3 | ||||||
|  | 	LSLW $2, R3, R3 | ||||||
|  | 	ORRW R3, R11, R11 | ||||||
|  | 	ORRW $1, R11, R11 | ||||||
|  | 	MOVB R11, 0(R8) | ||||||
|  | 	ADD  $2, R8, R8 | ||||||
|  | 	B    inlineEmitCopyEnd | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyStep3: | ||||||
|  | 	// Emit the remaining copy, encoded as 3 bytes. | ||||||
|  | 	SUBW $1, R3, R3 | ||||||
|  | 	LSLW $2, R3, R3 | ||||||
|  | 	ORRW $2, R3, R3 | ||||||
|  | 	MOVB R3, 0(R8) | ||||||
|  | 	MOVW R11, 1(R8) | ||||||
|  | 	ADD  $3, R8, R8 | ||||||
|  | 
 | ||||||
|  | inlineEmitCopyEnd: | ||||||
|  | 	// End inline of the emitCopy call. | ||||||
|  | 	// ---------------------------------------- | ||||||
|  | 
 | ||||||
|  | 	// nextEmit = s | ||||||
|  | 	MOVD R7, R10 | ||||||
|  | 
 | ||||||
|  | 	// if s >= sLimit { goto emitRemainder } | ||||||
|  | 	MOVD R7, R3 | ||||||
|  | 	SUB  R6, R3, R3 | ||||||
|  | 	CMP  R3, R9 | ||||||
|  | 	BLS  emitRemainder | ||||||
|  | 
 | ||||||
|  | 	// As per the encode_other.go code: | ||||||
|  | 	// | ||||||
|  | 	// We could immediately etc. | ||||||
|  | 
 | ||||||
|  | 	// x := load64(src, s-1) | ||||||
|  | 	MOVD -1(R7), R14 | ||||||
|  | 
 | ||||||
|  | 	// prevHash := hash(uint32(x>>0), shift) | ||||||
|  | 	MOVW R14, R11 | ||||||
|  | 	MULW R16, R11, R11 | ||||||
|  | 	LSRW R5, R11, R11 | ||||||
|  | 
 | ||||||
|  | 	// table[prevHash] = uint16(s-1) | ||||||
|  | 	MOVD R7, R3 | ||||||
|  | 	SUB  R6, R3, R3 | ||||||
|  | 	SUB  $1, R3, R3 | ||||||
|  | 
 | ||||||
|  | 	MOVHU R3, 0(R17)(R11<<1) | ||||||
|  | 
 | ||||||
|  | 	// currHash := hash(uint32(x>>8), shift) | ||||||
|  | 	LSR  $8, R14, R14 | ||||||
|  | 	MOVW R14, R11 | ||||||
|  | 	MULW R16, R11, R11 | ||||||
|  | 	LSRW R5, R11, R11 | ||||||
|  | 
 | ||||||
|  | 	// candidate = int(table[currHash]) | ||||||
|  | 	MOVHU 0(R17)(R11<<1), R15 | ||||||
|  | 
 | ||||||
|  | 	// table[currHash] = uint16(s) | ||||||
|  | 	ADD   $1, R3, R3 | ||||||
|  | 	MOVHU R3, 0(R17)(R11<<1) | ||||||
|  | 
 | ||||||
|  | 	// if uint32(x>>8) == load32(src, candidate) { continue } | ||||||
|  | 	MOVW (R6)(R15*1), R4 | ||||||
|  | 	CMPW R4, R14 | ||||||
|  | 	BEQ  inner1 | ||||||
|  | 
 | ||||||
|  | 	// nextHash = hash(uint32(x>>16), shift) | ||||||
|  | 	LSR  $8, R14, R14 | ||||||
|  | 	MOVW R14, R11 | ||||||
|  | 	MULW R16, R11, R11 | ||||||
|  | 	LSRW R5, R11, R11 | ||||||
|  | 
 | ||||||
|  | 	// s++ | ||||||
|  | 	ADD $1, R7, R7 | ||||||
|  | 
 | ||||||
|  | 	// break out of the inner1 for loop, i.e. continue the outer loop. | ||||||
|  | 	B outer | ||||||
|  | 
 | ||||||
|  | emitRemainder: | ||||||
|  | 	// if nextEmit < len(src) { etc } | ||||||
|  | 	MOVD src_len+32(FP), R3 | ||||||
|  | 	ADD  R6, R3, R3 | ||||||
|  | 	CMP  R3, R10 | ||||||
|  | 	BEQ  encodeBlockEnd | ||||||
|  | 
 | ||||||
|  | 	// d += emitLiteral(dst[d:], src[nextEmit:]) | ||||||
|  | 	// | ||||||
|  | 	// Push args. | ||||||
|  | 	MOVD R8, 8(RSP) | ||||||
|  | 	MOVD $0, 16(RSP)  // Unnecessary, as the callee ignores it, but conservative. | ||||||
|  | 	MOVD $0, 24(RSP)  // Unnecessary, as the callee ignores it, but conservative. | ||||||
|  | 	MOVD R10, 32(RSP) | ||||||
|  | 	SUB  R10, R3, R3 | ||||||
|  | 	MOVD R3, 40(RSP) | ||||||
|  | 	MOVD R3, 48(RSP)  // Unnecessary, as the callee ignores it, but conservative. | ||||||
|  | 
 | ||||||
|  | 	// Spill local variables (registers) onto the stack; call; unspill.
 | ||||||
|  | 	MOVD R8, 88(RSP) | ||||||
|  | 	CALL ·emitLiteral(SB) | ||||||
|  | 	MOVD 88(RSP), R8 | ||||||
|  | 
 | ||||||
|  | 	// Finish the "d +=" part of "d += emitLiteral(etc)". | ||||||
|  | 	MOVD 56(RSP), R1 | ||||||
|  | 	ADD  R1, R8, R8 | ||||||
|  | 
 | ||||||
|  | encodeBlockEnd: | ||||||
|  | 	MOVD dst_base+0(FP), R3 | ||||||
|  | 	SUB  R3, R8, R8 | ||||||
|  | 	MOVD R8, d+48(FP) | ||||||
|  | 	RET | ||||||
							
								
								
									
										30
									
								
								vendor/github.com/golang/snappy/encode_asm.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/golang/snappy/encode_asm.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,30 @@ | ||||||
|  | // Copyright 2016 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | // +build amd64 arm64 | ||||||
|  | 
 | ||||||
|  | package snappy | ||||||
|  | 
 | ||||||
|  | // emitLiteral has the same semantics as in encode_other.go. | ||||||
|  | // | ||||||
|  | //go:noescape | ||||||
|  | func emitLiteral(dst, lit []byte) int | ||||||
|  | 
 | ||||||
|  | // emitCopy has the same semantics as in encode_other.go. | ||||||
|  | // | ||||||
|  | //go:noescape | ||||||
|  | func emitCopy(dst []byte, offset, length int) int | ||||||
|  | 
 | ||||||
|  | // extendMatch has the same semantics as in encode_other.go. | ||||||
|  | // | ||||||
|  | //go:noescape | ||||||
|  | func extendMatch(src []byte, i, j int) int | ||||||
|  | 
 | ||||||
|  | // encodeBlock has the same semantics as in encode_other.go. | ||||||
|  | // | ||||||
|  | //go:noescape | ||||||
|  | func encodeBlock(dst, src []byte) (d int) | ||||||
							
								
								
									
										238
									
								
								vendor/github.com/golang/snappy/encode_other.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										238
									
								
								vendor/github.com/golang/snappy/encode_other.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,238 @@ | ||||||
|  | // Copyright 2016 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // +build !amd64,!arm64 appengine !gc noasm | ||||||
|  | 
 | ||||||
|  | package snappy | ||||||
|  | 
 | ||||||
|  | func load32(b []byte, i int) uint32 { | ||||||
|  | 	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. | ||||||
|  | 	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func load64(b []byte, i int) uint64 { | ||||||
|  | 	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. | ||||||
|  | 	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | | ||||||
|  | 		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // emitLiteral writes a literal chunk and returns the number of bytes written. | ||||||
|  | // | ||||||
|  | // It assumes that: | ||||||
|  | //	dst is long enough to hold the encoded bytes | ||||||
|  | //	1 <= len(lit) && len(lit) <= 65536 | ||||||
|  | func emitLiteral(dst, lit []byte) int { | ||||||
|  | 	i, n := 0, uint(len(lit)-1) | ||||||
|  | 	switch { | ||||||
|  | 	case n < 60: | ||||||
|  | 		dst[0] = uint8(n)<<2 | tagLiteral | ||||||
|  | 		i = 1 | ||||||
|  | 	case n < 1<<8: | ||||||
|  | 		dst[0] = 60<<2 | tagLiteral | ||||||
|  | 		dst[1] = uint8(n) | ||||||
|  | 		i = 2 | ||||||
|  | 	default: | ||||||
|  | 		dst[0] = 61<<2 | tagLiteral | ||||||
|  | 		dst[1] = uint8(n) | ||||||
|  | 		dst[2] = uint8(n >> 8) | ||||||
|  | 		i = 3 | ||||||
|  | 	} | ||||||
|  | 	return i + copy(dst[i:], lit) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // emitCopy writes a copy chunk and returns the number of bytes written. | ||||||
|  | // | ||||||
|  | // It assumes that: | ||||||
|  | //	dst is long enough to hold the encoded bytes | ||||||
|  | //	1 <= offset && offset <= 65535 | ||||||
|  | //	4 <= length && length <= 65535 | ||||||
|  | func emitCopy(dst []byte, offset, length int) int { | ||||||
|  | 	i := 0 | ||||||
|  | 	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The | ||||||
|  | 	// threshold for this loop is a little higher (at 68 = 64 + 4), and the | ||||||
|  | 	// length emitted down below is is a little lower (at 60 = 64 - 4), because | ||||||
|  | 	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed | ||||||
|  | 	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as | ||||||
|  | 	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as | ||||||
|  | 	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a | ||||||
|  | 	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an | ||||||
|  | 	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. | ||||||
|  | 	for length >= 68 { | ||||||
|  | 		// Emit a length 64 copy, encoded as 3 bytes. | ||||||
|  | 		dst[i+0] = 63<<2 | tagCopy2 | ||||||
|  | 		dst[i+1] = uint8(offset) | ||||||
|  | 		dst[i+2] = uint8(offset >> 8) | ||||||
|  | 		i += 3 | ||||||
|  | 		length -= 64 | ||||||
|  | 	} | ||||||
|  | 	if length > 64 { | ||||||
|  | 		// Emit a length 60 copy, encoded as 3 bytes. | ||||||
|  | 		dst[i+0] = 59<<2 | tagCopy2 | ||||||
|  | 		dst[i+1] = uint8(offset) | ||||||
|  | 		dst[i+2] = uint8(offset >> 8) | ||||||
|  | 		i += 3 | ||||||
|  | 		length -= 60 | ||||||
|  | 	} | ||||||
|  | 	if length >= 12 || offset >= 2048 { | ||||||
|  | 		// Emit the remaining copy, encoded as 3 bytes. | ||||||
|  | 		dst[i+0] = uint8(length-1)<<2 | tagCopy2 | ||||||
|  | 		dst[i+1] = uint8(offset) | ||||||
|  | 		dst[i+2] = uint8(offset >> 8) | ||||||
|  | 		return i + 3 | ||||||
|  | 	} | ||||||
|  | 	// Emit the remaining copy, encoded as 2 bytes. | ||||||
|  | 	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 | ||||||
|  | 	dst[i+1] = uint8(offset) | ||||||
|  | 	return i + 2 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // extendMatch returns the largest k such that k <= len(src) and that | ||||||
|  | // src[i:i+k-j] and src[j:k] have the same contents. | ||||||
|  | // | ||||||
|  | // It assumes that: | ||||||
|  | //	0 <= i && i < j && j <= len(src) | ||||||
|  | func extendMatch(src []byte, i, j int) int { | ||||||
|  | 	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { | ||||||
|  | 	} | ||||||
|  | 	return j | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func hash(u, shift uint32) uint32 { | ||||||
|  | 	return (u * 0x1e35a7bd) >> shift | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | ||||||
|  | // assumes that the varint-encoded length of the decompressed bytes has already | ||||||
|  | // been written. | ||||||
|  | // | ||||||
|  | // It also assumes that: | ||||||
|  | //	len(dst) >= MaxEncodedLen(len(src)) && | ||||||
|  | // 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | ||||||
|  | func encodeBlock(dst, src []byte) (d int) { | ||||||
|  | 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. | ||||||
|  | 	// The table element type is uint16, as s < sLimit and sLimit < len(src) | ||||||
|  | 	// and len(src) <= maxBlockSize and maxBlockSize == 65536. | ||||||
|  | 	const ( | ||||||
|  | 		maxTableSize = 1 << 14 | ||||||
|  | 		// tableMask is redundant, but helps the compiler eliminate bounds | ||||||
|  | 		// checks. | ||||||
|  | 		tableMask = maxTableSize - 1 | ||||||
|  | 	) | ||||||
|  | 	shift := uint32(32 - 8) | ||||||
|  | 	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { | ||||||
|  | 		shift-- | ||||||
|  | 	} | ||||||
|  | 	// In Go, all array elements are zero-initialized, so there is no advantage | ||||||
|  | 	// to a smaller tableSize per se. However, it matches the C++ algorithm, | ||||||
|  | 	// and in the asm versions of this code, we can get away with zeroing only | ||||||
|  | 	// the first tableSize elements. | ||||||
|  | 	var table [maxTableSize]uint16 | ||||||
|  | 
 | ||||||
|  | 	// sLimit is when to stop looking for offset/length copies. The inputMargin | ||||||
|  | 	// lets us use a fast path for emitLiteral in the main loop, while we are | ||||||
|  | 	// looking for copies. | ||||||
|  | 	sLimit := len(src) - inputMargin | ||||||
|  | 
 | ||||||
|  | 	// nextEmit is where in src the next emitLiteral should start from. | ||||||
|  | 	nextEmit := 0 | ||||||
|  | 
 | ||||||
|  | 	// The encoded form must start with a literal, as there are no previous | ||||||
|  | 	// bytes to copy, so we start looking for hash matches at s == 1. | ||||||
|  | 	s := 1 | ||||||
|  | 	nextHash := hash(load32(src, s), shift) | ||||||
|  | 
 | ||||||
|  | 	for { | ||||||
|  | 		// Copied from the C++ snappy implementation: | ||||||
|  | 		// | ||||||
|  | 		// Heuristic match skipping: If 32 bytes are scanned with no matches | ||||||
|  | 		// found, start looking only at every other byte. If 32 more bytes are | ||||||
|  | 		// scanned (or skipped), look at every third byte, etc.. When a match | ||||||
|  | 		// is found, immediately go back to looking at every byte. This is a | ||||||
|  | 		// small loss (~5% performance, ~0.1% density) for compressible data | ||||||
|  | 		// due to more bookkeeping, but for non-compressible data (such as | ||||||
|  | 		// JPEG) it's a huge win since the compressor quickly "realizes" the | ||||||
|  | 		// data is incompressible and doesn't bother looking for matches | ||||||
|  | 		// everywhere. | ||||||
|  | 		// | ||||||
|  | 		// The "skip" variable keeps track of how many bytes there are since | ||||||
|  | 		// the last match; dividing it by 32 (ie. right-shifting by five) gives | ||||||
|  | 		// the number of bytes to move ahead for each iteration. | ||||||
|  | 		skip := 32 | ||||||
|  | 
 | ||||||
|  | 		nextS := s | ||||||
|  | 		candidate := 0 | ||||||
|  | 		for { | ||||||
|  | 			s = nextS | ||||||
|  | 			bytesBetweenHashLookups := skip >> 5 | ||||||
|  | 			nextS = s + bytesBetweenHashLookups | ||||||
|  | 			skip += bytesBetweenHashLookups | ||||||
|  | 			if nextS > sLimit { | ||||||
|  | 				goto emitRemainder | ||||||
|  | 			} | ||||||
|  | 			candidate = int(table[nextHash&tableMask]) | ||||||
|  | 			table[nextHash&tableMask] = uint16(s) | ||||||
|  | 			nextHash = hash(load32(src, nextS), shift) | ||||||
|  | 			if load32(src, s) == load32(src, candidate) { | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// A 4-byte match has been found. We'll later see if more than 4 bytes | ||||||
|  | 		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | ||||||
|  | 		// them as literal bytes. | ||||||
|  | 		d += emitLiteral(dst[d:], src[nextEmit:s]) | ||||||
|  | 
 | ||||||
|  | 		// Call emitCopy, and then see if another emitCopy could be our next | ||||||
|  | 		// move. Repeat until we find no match for the input immediately after | ||||||
|  | 		// what was consumed by the last emitCopy call. | ||||||
|  | 		// | ||||||
|  | 		// If we exit this loop normally then we need to call emitLiteral next, | ||||||
|  | 		// though we don't yet know how big the literal will be. We handle that | ||||||
|  | 		// by proceeding to the next iteration of the main loop. We also can | ||||||
|  | 		// exit this loop via goto if we get close to exhausting the input. | ||||||
|  | 		for { | ||||||
|  | 			// Invariant: we have a 4-byte match at s, and no need to emit any | ||||||
|  | 			// literal bytes prior to s. | ||||||
|  | 			base := s | ||||||
|  | 
 | ||||||
|  | 			// Extend the 4-byte match as long as possible. | ||||||
|  | 			// | ||||||
|  | 			// This is an inlined version of: | ||||||
|  | 			//	s = extendMatch(src, candidate+4, s+4) | ||||||
|  | 			s += 4 | ||||||
|  | 			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			d += emitCopy(dst[d:], base-candidate, s-base) | ||||||
|  | 			nextEmit = s | ||||||
|  | 			if s >= sLimit { | ||||||
|  | 				goto emitRemainder | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// We could immediately start working at s now, but to improve | ||||||
|  | 			// compression we first update the hash table at s-1 and at s. If | ||||||
|  | 			// another emitCopy is not our next move, also calculate nextHash | ||||||
|  | 			// at s+1. At least on GOARCH=amd64, these three hash calculations | ||||||
|  | 			// are faster as one load64 call (with some shifts) instead of | ||||||
|  | 			// three load32 calls. | ||||||
|  | 			x := load64(src, s-1) | ||||||
|  | 			prevHash := hash(uint32(x>>0), shift) | ||||||
|  | 			table[prevHash&tableMask] = uint16(s - 1) | ||||||
|  | 			currHash := hash(uint32(x>>8), shift) | ||||||
|  | 			candidate = int(table[currHash&tableMask]) | ||||||
|  | 			table[currHash&tableMask] = uint16(s) | ||||||
|  | 			if uint32(x>>8) != load32(src, candidate) { | ||||||
|  | 				nextHash = hash(uint32(x>>16), shift) | ||||||
|  | 				s++ | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | emitRemainder: | ||||||
|  | 	if nextEmit < len(src) { | ||||||
|  | 		d += emitLiteral(dst[d:], src[nextEmit:]) | ||||||
|  | 	} | ||||||
|  | 	return d | ||||||
|  | } | ||||||
							
								
								
									
										98
									
								
								vendor/github.com/golang/snappy/snappy.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										98
									
								
								vendor/github.com/golang/snappy/snappy.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,98 @@ | ||||||
|  | // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||||
|  | // Use of this source code is governed by a BSD-style | ||||||
|  | // license that can be found in the LICENSE file. | ||||||
|  | 
 | ||||||
|  | // Package snappy implements the Snappy compression format. It aims for very | ||||||
|  | // high speeds and reasonable compression. | ||||||
|  | // | ||||||
|  | // There are actually two Snappy formats: block and stream. They are related, | ||||||
|  | // but different: trying to decompress block-compressed data as a Snappy stream | ||||||
|  | // will fail, and vice versa. The block format is the Decode and Encode | ||||||
|  | // functions and the stream format is the Reader and Writer types. | ||||||
|  | // | ||||||
|  | // The block format, the more common case, is used when the complete size (the | ||||||
|  | // number of bytes) of the original data is known upfront, at the time | ||||||
|  | // compression starts. The stream format, also known as the framing format, is | ||||||
|  | // for when that isn't always true. | ||||||
|  | // | ||||||
|  | // The canonical, C++ implementation is at https://github.com/google/snappy and | ||||||
|  | // it only implements the block format. | ||||||
|  | package snappy // import "github.com/golang/snappy" | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"hash/crc32" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | /* | ||||||
|  | Each encoded block begins with the varint-encoded length of the decoded data, | ||||||
|  | followed by a sequence of chunks. Chunks begin and end on byte boundaries. The | ||||||
|  | first byte of each chunk is broken into its 2 least and 6 most significant bits | ||||||
|  | called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. | ||||||
|  | Zero means a literal tag. All other values mean a copy tag. | ||||||
|  | 
 | ||||||
|  | For literal tags: | ||||||
|  |   - If m < 60, the next 1 + m bytes are literal bytes. | ||||||
|  |   - Otherwise, let n be the little-endian unsigned integer denoted by the next | ||||||
|  |     m - 59 bytes. The next 1 + n bytes after that are literal bytes. | ||||||
|  | 
 | ||||||
|  | For copy tags, length bytes are copied from offset bytes ago, in the style of | ||||||
|  | Lempel-Ziv compression algorithms. In particular: | ||||||
|  |   - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). | ||||||
|  |     The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 | ||||||
|  |     of the offset. The next byte is bits 0-7 of the offset. | ||||||
|  |   - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). | ||||||
|  |     The length is 1 + m. The offset is the little-endian unsigned integer | ||||||
|  |     denoted by the next 2 bytes. | ||||||
|  |   - For l == 3, this tag is a legacy format that is no longer issued by most | ||||||
|  |     encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in | ||||||
|  |     [1, 65). The length is 1 + m. The offset is the little-endian unsigned | ||||||
|  |     integer denoted by the next 4 bytes. | ||||||
|  | */ | ||||||
|  | const ( | ||||||
|  | 	tagLiteral = 0x00 | ||||||
|  | 	tagCopy1   = 0x01 | ||||||
|  | 	tagCopy2   = 0x02 | ||||||
|  | 	tagCopy4   = 0x03 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	checksumSize    = 4 | ||||||
|  | 	chunkHeaderSize = 4 | ||||||
|  | 	magicChunk      = "\xff\x06\x00\x00" + magicBody | ||||||
|  | 	magicBody       = "sNaPpY" | ||||||
|  | 
 | ||||||
|  | 	// maxBlockSize is the maximum size of the input to encodeBlock. It is not | ||||||
|  | 	// part of the wire format per se, but some parts of the encoder assume | ||||||
|  | 	// that an offset fits into a uint16. | ||||||
|  | 	// | ||||||
|  | 	// Also, for the framing format (Writer type instead of Encode function), | ||||||
|  | 	// https://github.com/google/snappy/blob/master/framing_format.txt says | ||||||
|  | 	// that "the uncompressed data in a chunk must be no longer than 65536 | ||||||
|  | 	// bytes". | ||||||
|  | 	maxBlockSize = 65536 | ||||||
|  | 
 | ||||||
|  | 	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is | ||||||
|  | 	// hard coded to be a const instead of a variable, so that obufLen can also | ||||||
|  | 	// be a const. Their equivalence is confirmed by | ||||||
|  | 	// TestMaxEncodedLenOfMaxBlockSize. | ||||||
|  | 	maxEncodedLenOfMaxBlockSize = 76490 | ||||||
|  | 
 | ||||||
|  | 	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize | ||||||
|  | 	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	chunkTypeCompressedData   = 0x00 | ||||||
|  | 	chunkTypeUncompressedData = 0x01 | ||||||
|  | 	chunkTypePadding          = 0xfe | ||||||
|  | 	chunkTypeStreamIdentifier = 0xff | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var crcTable = crc32.MakeTable(crc32.Castagnoli) | ||||||
|  | 
 | ||||||
|  | // crc implements the checksum specified in section 3 of | ||||||
|  | // https://github.com/google/snappy/blob/master/framing_format.txt | ||||||
|  | func crc(b []byte) uint32 { | ||||||
|  | 	c := crc32.Update(0, crcTable, b) | ||||||
|  | 	return uint32(c>>15|c<<17) + 0xa282ead8 | ||||||
|  | } | ||||||
							
								
								
									
										29
									
								
								vendor/modules.txt
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								vendor/modules.txt
									
										
									
									
										vendored
									
									
								
							|  | @ -1,3 +1,29 @@ | ||||||
|  | # git.iim.gay/grufwub/fastpath v0.2.2 | ||||||
|  | ## explicit; go 1.14 | ||||||
|  | git.iim.gay/grufwub/fastpath | ||||||
|  | # git.iim.gay/grufwub/go-bufpool v0.2.1 | ||||||
|  | ## explicit; go 1.16 | ||||||
|  | git.iim.gay/grufwub/go-bufpool | ||||||
|  | # git.iim.gay/grufwub/go-bytes v0.7.0 | ||||||
|  | ## explicit; go 1.16 | ||||||
|  | git.iim.gay/grufwub/go-bytes | ||||||
|  | # git.iim.gay/grufwub/go-errors v0.2.3 | ||||||
|  | ## explicit; go 1.15 | ||||||
|  | git.iim.gay/grufwub/go-errors | ||||||
|  | # git.iim.gay/grufwub/go-hashenc v0.3.0 | ||||||
|  | ## explicit; go 1.16 | ||||||
|  | git.iim.gay/grufwub/go-hashenc | ||||||
|  | # git.iim.gay/grufwub/go-mutexes v0.5.0 | ||||||
|  | ## explicit; go 1.16 | ||||||
|  | git.iim.gay/grufwub/go-mutexes | ||||||
|  | # git.iim.gay/grufwub/go-nowish v0.3.4 | ||||||
|  | ## explicit; go 1.16 | ||||||
|  | git.iim.gay/grufwub/go-nowish | ||||||
|  | # git.iim.gay/grufwub/go-store v0.4.1 | ||||||
|  | ## explicit; go 1.16 | ||||||
|  | git.iim.gay/grufwub/go-store/kv | ||||||
|  | git.iim.gay/grufwub/go-store/storage | ||||||
|  | git.iim.gay/grufwub/go-store/util | ||||||
| # github.com/ReneKroon/ttlcache v1.7.0 | # github.com/ReneKroon/ttlcache v1.7.0 | ||||||
| ## explicit; go 1.14 | ## explicit; go 1.14 | ||||||
| github.com/ReneKroon/ttlcache | github.com/ReneKroon/ttlcache | ||||||
|  | @ -285,6 +311,9 @@ github.com/golang/geo/s2 | ||||||
| # github.com/golang/protobuf v1.5.2 | # github.com/golang/protobuf v1.5.2 | ||||||
| ## explicit; go 1.9 | ## explicit; go 1.9 | ||||||
| github.com/golang/protobuf/proto | github.com/golang/protobuf/proto | ||||||
|  | # github.com/golang/snappy v0.0.3 | ||||||
|  | ## explicit | ||||||
|  | github.com/golang/snappy | ||||||
| # github.com/google/uuid v1.3.0 | # github.com/google/uuid v1.3.0 | ||||||
| ## explicit | ## explicit | ||||||
| github.com/google/uuid | github.com/google/uuid | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue