mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-12-07 00:48:06 -06:00
[feature] Clean up/uncache remote media (#407)
* Add whereNotEmptyAndNotNull * Add GetRemoteOlderThanDays * Add GetRemoteOlderThanDays * Add PruneRemote to Manager interface * Start implementing PruneRemote * add new attachment + status to tests * fix up and test GetRemoteOlderThan * fix bad import * PruneRemote: return number pruned * add Cached column to mediaattachment * update + test pruneRemote * update mediaTest * use Cached column * upstep bun to latest version * embed structs in mediaAttachment * migrate mediaAttachment to new format * don't default cached to true * select only remote media * update db dependencies * step bun back to last working version * update pruneRemote to use Cached field * fix storage path of test attachments * add recache logic to manager * fix trimmed aspect ratio * test prune and recache * return errwithcode * tidy up different paths for emoji vs attachment * fix incorrect thumbnail type being stored * expose TransportController to media processor * implement tee-ing recached content * add thoughts of dog to test fedi attachments * test get remote files * add comment on PruneRemote * add postData cleanup to recache * test thumbnail fetching * add incredible diagram * go mod tidy * buffer pipes for recache streaming * test for client stops reading after 1kb * add media-remote-cache-days to config * add cron package * wrap logrus so it's available to cron * start and stop cron jobs gracefully
This commit is contained in:
parent
100f1280a6
commit
07727753b9
424 changed files with 637100 additions and 176498 deletions
|
|
@ -79,15 +79,15 @@ func (m *FileServer) ServeFile(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
content, err := m.processor.FileGet(c.Request.Context(), authed, &model.GetContentRequestForm{
|
||||
content, errWithCode := m.processor.FileGet(c.Request.Context(), authed, &model.GetContentRequestForm{
|
||||
AccountID: accountID,
|
||||
MediaType: mediaType,
|
||||
MediaSize: mediaSize,
|
||||
FileName: fileName,
|
||||
})
|
||||
if err != nil {
|
||||
l.Debug(err)
|
||||
c.String(http.StatusNotFound, "404 page not found")
|
||||
if errWithCode != nil {
|
||||
l.Errorf(errWithCode.Error())
|
||||
c.JSON(errWithCode.Code(), gin.H{"error": errWithCode.Safe()})
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -104,7 +104,7 @@ func (m *FileServer) ServeFile(c *gin.Context) {
|
|||
// This is mostly needed because when sharing a link to a gts-hosted file on something like mastodon, the masto servers will
|
||||
// attempt to look up the content to provide a preview of the link, and they ask for text/html.
|
||||
format, err := api.NegotiateAccept(c, api.Offer(content.ContentType))
|
||||
if err != nil {
|
||||
if errWithCode != nil {
|
||||
c.JSON(http.StatusNotAcceptable, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ var Defaults = Values{
|
|||
MediaVideoMaxSize: 10485760, // 10mb
|
||||
MediaDescriptionMinChars: 0,
|
||||
MediaDescriptionMaxChars: 500,
|
||||
MediaRemoteCacheDays: 30,
|
||||
|
||||
StorageBackend: "local",
|
||||
StorageLocalBasePath: "/gotosocial/storage",
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ type KeyNames struct {
|
|||
MediaVideoMaxSize string
|
||||
MediaDescriptionMinChars string
|
||||
MediaDescriptionMaxChars string
|
||||
MediaRemoteCacheDays string
|
||||
|
||||
// storage
|
||||
StorageBackend string
|
||||
|
|
@ -138,6 +139,7 @@ var Keys = KeyNames{
|
|||
MediaVideoMaxSize: "media-video-max-size",
|
||||
MediaDescriptionMinChars: "media-description-min-chars",
|
||||
MediaDescriptionMaxChars: "media-description-max-chars",
|
||||
MediaRemoteCacheDays: "media-remote-cache-days",
|
||||
|
||||
StorageBackend: "storage-backend",
|
||||
StorageLocalBasePath: "storage-local-base-path",
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ type Values struct {
|
|||
MediaVideoMaxSize int
|
||||
MediaDescriptionMinChars int
|
||||
MediaDescriptionMaxChars int
|
||||
MediaRemoteCacheDays int
|
||||
|
||||
StorageBackend string
|
||||
StorageLocalBasePath string
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func (suite *BasicTestSuite) TestGetAllStatuses() {
|
|||
s := []*gtsmodel.Status{}
|
||||
err := suite.db.GetAll(context.Background(), &s)
|
||||
suite.NoError(err)
|
||||
suite.Len(s, 14)
|
||||
suite.Len(s, 15)
|
||||
}
|
||||
|
||||
func (suite *BasicTestSuite) TestGetAllNotNull() {
|
||||
|
|
|
|||
|
|
@ -55,9 +55,8 @@ func (suite *BunDBStandardTestSuite) SetupSuite() {
|
|||
}
|
||||
|
||||
func (suite *BunDBStandardTestSuite) SetupTest() {
|
||||
testrig.InitTestLog()
|
||||
testrig.InitTestConfig()
|
||||
|
||||
testrig.InitTestLog()
|
||||
suite.db = testrig.NewTestDB()
|
||||
testrig.StandardDBSetup(suite.db, suite.testAccounts)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ package bundb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
|
|
@ -48,3 +49,26 @@ func (m *mediaDB) GetAttachmentByID(ctx context.Context, id string) (*gtsmodel.M
|
|||
}
|
||||
return attachment, nil
|
||||
}
|
||||
|
||||
func (m *mediaDB) GetRemoteOlderThan(ctx context.Context, olderThan time.Time, limit int) ([]*gtsmodel.MediaAttachment, db.Error) {
|
||||
attachments := []*gtsmodel.MediaAttachment{}
|
||||
|
||||
q := m.conn.
|
||||
NewSelect().
|
||||
Model(&attachments).
|
||||
Where("media_attachment.cached = true").
|
||||
Where("media_attachment.avatar = false").
|
||||
Where("media_attachment.header = false").
|
||||
Where("media_attachment.created_at < ?", olderThan).
|
||||
WhereGroup(" AND ", whereNotEmptyAndNotNull("media_attachment.remote_url")).
|
||||
Order("media_attachment.created_at DESC")
|
||||
|
||||
if limit != 0 {
|
||||
q = q.Limit(limit)
|
||||
}
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
return nil, m.conn.ProcessError(err)
|
||||
}
|
||||
return attachments, nil
|
||||
}
|
||||
|
|
|
|||
48
internal/db/bundb/media_test.go
Normal file
48
internal/db/bundb/media_test.go
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package bundb_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type MediaTestSuite struct {
|
||||
BunDBStandardTestSuite
|
||||
}
|
||||
|
||||
func (suite *MediaTestSuite) TestGetAttachmentByID() {
|
||||
testAttachment := suite.testAttachments["admin_account_status_1_attachment_1"]
|
||||
attachment, err := suite.db.GetAttachmentByID(context.Background(), testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(attachment)
|
||||
}
|
||||
|
||||
func (suite *MediaTestSuite) TestGetOlder() {
|
||||
attachments, err := suite.db.GetRemoteOlderThan(context.Background(), time.Now(), 20)
|
||||
suite.NoError(err)
|
||||
suite.Len(attachments, 1)
|
||||
}
|
||||
|
||||
func TestMediaTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(MediaTestSuite))
|
||||
}
|
||||
172
internal/db/bundb/migrations/20220214175650_media_cleanup.go
Normal file
172
internal/db/bundb/migrations/20220214175650_media_cleanup.go
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
previousgtsmodel "github.com/superseriousbusiness/gotosocial/internal/db/bundb/migrations/20211113114307_init"
|
||||
newgtsmodel "github.com/superseriousbusiness/gotosocial/internal/db/bundb/migrations/20220214175650_media_cleanup"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
func init() {
|
||||
const batchSize = 100
|
||||
up := func(ctx context.Context, db *bun.DB) error {
|
||||
// we need to migrate media attachments into a new table
|
||||
// see section 6 here: https://www.sqlite.org/lang_altertable.html
|
||||
|
||||
return db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
|
||||
// create the new media attachments table
|
||||
if _, err := tx.
|
||||
NewCreateTable().
|
||||
ModelTableExpr("new_media_attachments").
|
||||
Model(&newgtsmodel.MediaAttachment{}).
|
||||
IfNotExists().
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offset := time.Now()
|
||||
// migrate existing media attachments into new table
|
||||
migrateLoop:
|
||||
for {
|
||||
oldAttachments := []*previousgtsmodel.MediaAttachment{}
|
||||
err := tx.
|
||||
NewSelect().
|
||||
Model(&oldAttachments).
|
||||
// subtract a millisecond from the offset just to make sure we're not getting double entries (this happens sometimes)
|
||||
Where("media_attachment.created_at < ?", offset.Add(-1*time.Millisecond)).
|
||||
Order("media_attachment.created_at DESC").
|
||||
Limit(batchSize).
|
||||
Scan(ctx)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
// there's been a real error
|
||||
return err
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows || len(oldAttachments) == 0 {
|
||||
// we're finished migrating
|
||||
break migrateLoop
|
||||
}
|
||||
|
||||
// update the offset to the createdAt time of the oldest media attachment in the slice
|
||||
offset = oldAttachments[len(oldAttachments)-1].CreatedAt
|
||||
|
||||
// for every old attachment, we need to make a new attachment out of it by taking the same values
|
||||
newAttachments := []*newgtsmodel.MediaAttachment{}
|
||||
for _, old := range oldAttachments {
|
||||
new := &newgtsmodel.MediaAttachment{
|
||||
ID: old.ID,
|
||||
CreatedAt: old.CreatedAt,
|
||||
UpdatedAt: old.UpdatedAt,
|
||||
StatusID: old.StatusID,
|
||||
URL: old.URL,
|
||||
RemoteURL: old.RemoteURL,
|
||||
Type: newgtsmodel.FileType(old.Type),
|
||||
FileMeta: newgtsmodel.FileMeta{
|
||||
Original: newgtsmodel.Original{
|
||||
Width: old.FileMeta.Original.Width,
|
||||
Height: old.FileMeta.Original.Height,
|
||||
Size: old.FileMeta.Original.Size,
|
||||
Aspect: old.FileMeta.Original.Aspect,
|
||||
},
|
||||
Small: newgtsmodel.Small{
|
||||
Width: old.FileMeta.Small.Width,
|
||||
Height: old.FileMeta.Small.Height,
|
||||
Size: old.FileMeta.Small.Size,
|
||||
Aspect: old.FileMeta.Small.Aspect,
|
||||
},
|
||||
Focus: newgtsmodel.Focus{
|
||||
X: old.FileMeta.Focus.X,
|
||||
Y: old.FileMeta.Focus.Y,
|
||||
},
|
||||
},
|
||||
AccountID: old.AccountID,
|
||||
Description: old.Description,
|
||||
ScheduledStatusID: old.ScheduledStatusID,
|
||||
Blurhash: old.Blurhash,
|
||||
Processing: newgtsmodel.ProcessingStatus(old.Processing),
|
||||
File: newgtsmodel.File{
|
||||
Path: old.File.Path,
|
||||
ContentType: old.File.ContentType,
|
||||
FileSize: old.File.FileSize,
|
||||
UpdatedAt: old.File.UpdatedAt,
|
||||
},
|
||||
Thumbnail: newgtsmodel.Thumbnail{
|
||||
Path: old.Thumbnail.Path,
|
||||
ContentType: old.Thumbnail.ContentType,
|
||||
FileSize: old.Thumbnail.FileSize,
|
||||
UpdatedAt: old.Thumbnail.UpdatedAt,
|
||||
URL: old.Thumbnail.URL,
|
||||
RemoteURL: old.Thumbnail.RemoteURL,
|
||||
},
|
||||
Avatar: old.Avatar,
|
||||
Header: old.Header,
|
||||
Cached: true,
|
||||
}
|
||||
newAttachments = append(newAttachments, new)
|
||||
}
|
||||
|
||||
// insert this batch of new attachments, and then continue the loop
|
||||
if _, err := tx.
|
||||
NewInsert().
|
||||
Model(&newAttachments).
|
||||
ModelTableExpr("new_media_attachments").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we have all the data we need from the old table, so we can safely drop it now
|
||||
if _, err := tx.NewDropTable().Model(&previousgtsmodel.MediaAttachment{}).Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rename the new table to the same name as the old table was
|
||||
if _, err := tx.QueryContext(ctx, "ALTER TABLE new_media_attachments RENAME TO media_attachments;"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add an index to the new table
|
||||
if _, err := tx.
|
||||
NewCreateIndex().
|
||||
Model(&newgtsmodel.MediaAttachment{}).
|
||||
Index("media_attachments_id_idx").
|
||||
Column("id").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
down := func(ctx context.Context, db *bun.DB) error {
|
||||
return db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := Migrations.Register(up, down); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
// Package gtsmodel contains types used *internally* by GoToSocial and added/removed/selected from the database.
|
||||
// These types should never be serialized and/or sent out via public APIs, as they contain sensitive information.
|
||||
// The annotation used on these structs is for handling them via the bun-db ORM.
|
||||
// See here for more info on bun model annotations: https://bun.uptrace.dev/guide/models.html
|
||||
package gtsmodel
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Account represents either a local or a remote fediverse account, gotosocial or otherwise (mastodon, pleroma, etc).
|
||||
type Account struct {
|
||||
ID string `validate:"required,ulid" bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database
|
||||
CreatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created
|
||||
UpdatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item last updated
|
||||
Username string `validate:"required" bun:",nullzero,notnull,unique:userdomain"` // Username of the account, should just be a string of [a-zA-Z0-9_]. Can be added to domain to create the full username in the form ``[username]@[domain]`` eg., ``user_96@example.org``. Username and domain should be unique *with* each other
|
||||
Domain string `validate:"omitempty,fqdn" bun:",nullzero,unique:userdomain"` // Domain of the account, will be null if this is a local account, otherwise something like ``example.org``. Should be unique with username.
|
||||
AvatarMediaAttachmentID string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // Database ID of the media attachment, if present
|
||||
AvatarMediaAttachment *MediaAttachment `validate:"-" bun:"rel:belongs-to"` // MediaAttachment corresponding to avatarMediaAttachmentID
|
||||
AvatarRemoteURL string `validate:"omitempty,url" bun:",nullzero"` // For a non-local account, where can the header be fetched?
|
||||
HeaderMediaAttachmentID string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // Database ID of the media attachment, if present
|
||||
HeaderMediaAttachment *MediaAttachment `validate:"-" bun:"rel:belongs-to"` // MediaAttachment corresponding to headerMediaAttachmentID
|
||||
HeaderRemoteURL string `validate:"omitempty,url" bun:",nullzero"` // For a non-local account, where can the header be fetched?
|
||||
DisplayName string `validate:"-" bun:""` // DisplayName for this account. Can be empty, then just the Username will be used for display purposes.
|
||||
Fields []Field `validate:"-"` // a key/value map of fields that this account has added to their profile
|
||||
Note string `validate:"-" bun:""` // A note that this account has on their profile (ie., the account's bio/description of themselves)
|
||||
Memorial bool `validate:"-" bun:",default:false"` // Is this a memorial account, ie., has the user passed away?
|
||||
AlsoKnownAs string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // This account is associated with x account id (TODO: migrate to be AlsoKnownAsID)
|
||||
MovedToAccountID string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // This account has moved this account id in the database
|
||||
Bot bool `validate:"-" bun:",default:false"` // Does this account identify itself as a bot?
|
||||
Reason string `validate:"-" bun:""` // What reason was given for signing up when this account was created?
|
||||
Locked bool `validate:"-" bun:",default:true"` // Does this account need an approval for new followers?
|
||||
Discoverable bool `validate:"-" bun:",default:false"` // Should this account be shown in the instance's profile directory?
|
||||
Privacy Visibility `validate:"required_without=Domain,omitempty,oneof=public unlocked followers_only mutuals_only direct" bun:",nullzero"` // Default post privacy for this account
|
||||
Sensitive bool `validate:"-" bun:",default:false"` // Set posts from this account to sensitive by default?
|
||||
Language string `validate:"omitempty,bcp47_language_tag" bun:",nullzero,notnull,default:'en'"` // What language does this account post in?
|
||||
URI string `validate:"required,url" bun:",nullzero,notnull,unique"` // ActivityPub URI for this account.
|
||||
URL string `validate:"required_without=Domain,omitempty,url" bun:",nullzero,unique"` // Web URL for this account's profile
|
||||
LastWebfingeredAt time.Time `validate:"required_with=Domain" bun:"type:timestamptz,nullzero"` // Last time this account was refreshed/located with webfinger.
|
||||
InboxURI string `validate:"required_without=Domain,omitempty,url" bun:",nullzero,unique"` // Address of this account's ActivityPub inbox, for sending activity to
|
||||
OutboxURI string `validate:"required_without=Domain,omitempty,url" bun:",nullzero,unique"` // Address of this account's activitypub outbox
|
||||
FollowingURI string `validate:"required_without=Domain,omitempty,url" bun:",nullzero,unique"` // URI for getting the following list of this account
|
||||
FollowersURI string `validate:"required_without=Domain,omitempty,url" bun:",nullzero,unique"` // URI for getting the followers list of this account
|
||||
FeaturedCollectionURI string `validate:"required_without=Domain,omitempty,url" bun:",nullzero,unique"` // URL for getting the featured collection list of this account
|
||||
ActorType string `validate:"oneof=Application Group Organization Person Service" bun:",nullzero,notnull"` // What type of activitypub actor is this account?
|
||||
PrivateKey *rsa.PrivateKey `validate:"required_without=Domain"` // Privatekey for validating activitypub requests, will only be defined for local accounts
|
||||
PublicKey *rsa.PublicKey `validate:"required"` // Publickey for encoding activitypub requests, will be defined for both local and remote accounts
|
||||
PublicKeyURI string `validate:"required,url" bun:",nullzero,notnull,unique"` // Web-reachable location of this account's public key
|
||||
SensitizedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero"` // When was this account set to have all its media shown as sensitive?
|
||||
SilencedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero"` // When was this account silenced (eg., statuses only visible to followers, not public)?
|
||||
SuspendedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero"` // When was this account suspended (eg., don't allow it to log in/post, don't accept media/posts from this account)
|
||||
HideCollections bool `validate:"-" bun:",default:false"` // Hide this account's collections
|
||||
SuspensionOrigin string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // id of the database entry that caused this account to become suspended -- can be an account ID or a domain block ID
|
||||
}
|
||||
|
||||
// Field represents a key value field on an account, for things like pronouns, website, etc.
|
||||
// VerifiedAt is optional, to be used only if Value is a URL to a webpage that contains the
|
||||
// username of the user.
|
||||
type Field struct {
|
||||
Name string `validate:"required"` // Name of this field.
|
||||
Value string `validate:"required"` // Value of this field.
|
||||
VerifiedAt time.Time `validate:"-" bun:",nullzero"` // This field was verified at (optional).
|
||||
}
|
||||
|
||||
// Relationship describes a requester's relationship with another account.
|
||||
type Relationship struct {
|
||||
ID string // The account id.
|
||||
Following bool // Are you following this user?
|
||||
ShowingReblogs bool // Are you receiving this user's boosts in your home timeline?
|
||||
Notifying bool // Have you enabled notifications for this user?
|
||||
FollowedBy bool // Are you followed by this user?
|
||||
Blocking bool // Are you blocking this user?
|
||||
BlockedBy bool // Is this user blocking you?
|
||||
Muting bool // Are you muting this user?
|
||||
MutingNotifications bool // Are you muting notifications from this user?
|
||||
Requested bool // Do you have a pending follow request for this user?
|
||||
DomainBlocking bool // Are you blocking this user's domain?
|
||||
Endorsed bool // Are you featuring this user on your profile?
|
||||
Note string // Your note on this account.
|
||||
}
|
||||
|
||||
// Visibility represents the visibility granularity of a status.
|
||||
type Visibility string
|
||||
|
||||
const (
|
||||
// VisibilityPublic means this status will be visible to everyone on all timelines.
|
||||
VisibilityPublic Visibility = "public"
|
||||
// VisibilityUnlocked means this status will be visible to everyone, but will only show on home timeline to followers, and in lists.
|
||||
VisibilityUnlocked Visibility = "unlocked"
|
||||
// VisibilityFollowersOnly means this status is viewable to followers only.
|
||||
VisibilityFollowersOnly Visibility = "followers_only"
|
||||
// VisibilityMutualsOnly means this status is visible to mutual followers only.
|
||||
VisibilityMutualsOnly Visibility = "mutuals_only"
|
||||
// VisibilityDirect means this status is visible only to mentioned recipients.
|
||||
VisibilityDirect Visibility = "direct"
|
||||
// VisibilityDefault is used when no other setting can be found.
|
||||
VisibilityDefault Visibility = VisibilityUnlocked
|
||||
)
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package gtsmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type MediaAttachment struct {
|
||||
ID string `validate:"required,ulid" bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database
|
||||
CreatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created
|
||||
UpdatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item last updated
|
||||
StatusID string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // ID of the status to which this is attached
|
||||
URL string `validate:"required_without=RemoteURL,omitempty,url" bun:",nullzero"` // Where can the attachment be retrieved on *this* server
|
||||
RemoteURL string `validate:"required_without=URL,omitempty,url" bun:",nullzero"` // Where can the attachment be retrieved on a remote server (empty for local media)
|
||||
Type FileType `validate:"oneof=Image Gif Audio Video Unknown" bun:",nullzero,notnull"` // Type of file (image/gif/audio/video)
|
||||
FileMeta FileMeta `validate:"required" bun:",embed:filemeta_,nullzero,notnull"` // Metadata about the file
|
||||
AccountID string `validate:"required,ulid" bun:"type:CHAR(26),nullzero,notnull"` // To which account does this attachment belong
|
||||
Account *Account `validate:"-" bun:"rel:has-one"` // Account corresponding to accountID
|
||||
Description string `validate:"-" bun:""` // Description of the attachment (for screenreaders)
|
||||
ScheduledStatusID string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // To which scheduled status does this attachment belong
|
||||
Blurhash string `validate:"required_if=Type Image,required_if=Type Gif,required_if=Type Video" bun:",nullzero"` // What is the generated blurhash of this attachment
|
||||
Processing ProcessingStatus `validate:"oneof=0 1 2 666" bun:",notnull,default:2"` // What is the processing status of this attachment
|
||||
File File `validate:"required" bun:",embed:file_,notnull,nullzero"` // metadata for the whole file
|
||||
Thumbnail Thumbnail `validate:"required" bun:",embed:thumbnail_,notnull,nullzero"` // small image thumbnail derived from a larger image, video, or audio file.
|
||||
Avatar bool `validate:"-" bun:",notnull,default:false"` // Is this attachment being used as an avatar?
|
||||
Header bool `validate:"-" bun:",notnull,default:false"` // Is this attachment being used as a header?
|
||||
Cached bool `validate:"-" bun:",notnull"` // Is this attachment currently cached by our instance?
|
||||
}
|
||||
|
||||
// File refers to the metadata for the whole file
|
||||
type File struct {
|
||||
Path string `validate:"required,file" bun:",nullzero,notnull"` // Path of the file in storage.
|
||||
ContentType string `validate:"required" bun:",nullzero,notnull"` // MIME content type of the file.
|
||||
FileSize int `validate:"required" bun:",notnull"` // File size in bytes
|
||||
UpdatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // When was the file last updated.
|
||||
}
|
||||
|
||||
// Thumbnail refers to a small image thumbnail derived from a larger image, video, or audio file.
|
||||
type Thumbnail struct {
|
||||
Path string `validate:"required,file" bun:",nullzero,notnull"` // Path of the file in storage.
|
||||
ContentType string `validate:"required" bun:",nullzero,notnull"` // MIME content type of the file.
|
||||
FileSize int `validate:"required" bun:",notnull"` // File size in bytes
|
||||
UpdatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // When was the file last updated.
|
||||
URL string `validate:"required_without=RemoteURL,omitempty,url" bun:",nullzero"` // What is the URL of the thumbnail on the local server
|
||||
RemoteURL string `validate:"required_without=URL,omitempty,url" bun:",nullzero"` // What is the remote URL of the thumbnail (empty for local media)
|
||||
}
|
||||
|
||||
// ProcessingStatus refers to how far along in the processing stage the attachment is.
|
||||
type ProcessingStatus int
|
||||
|
||||
// MediaAttachment processing states.
|
||||
const (
|
||||
ProcessingStatusReceived ProcessingStatus = 0 // ProcessingStatusReceived indicates the attachment has been received and is awaiting processing. No thumbnail available yet.
|
||||
ProcessingStatusProcessing ProcessingStatus = 1 // ProcessingStatusProcessing indicates the attachment is currently being processed. Thumbnail is available but full media is not.
|
||||
ProcessingStatusProcessed ProcessingStatus = 2 // ProcessingStatusProcessed indicates the attachment has been fully processed and is ready to be served.
|
||||
ProcessingStatusError ProcessingStatus = 666 // ProcessingStatusError indicates something went wrong processing the attachment and it won't be tried again--these can be deleted.
|
||||
)
|
||||
|
||||
// FileType refers to the file type of the media attaachment.
|
||||
type FileType string
|
||||
|
||||
// MediaAttachment file types.
|
||||
const (
|
||||
FileTypeImage FileType = "Image" // FileTypeImage is for jpegs and pngs
|
||||
FileTypeGif FileType = "Gif" // FileTypeGif is for native gifs and soundless videos that have been converted to gifs
|
||||
FileTypeAudio FileType = "Audio" // FileTypeAudio is for audio-only files (no video)
|
||||
FileTypeVideo FileType = "Video" // FileTypeVideo is for files with audio + visual
|
||||
FileTypeUnknown FileType = "Unknown" // FileTypeUnknown is for unknown file types (surprise surprise!)
|
||||
)
|
||||
|
||||
// FileMeta describes metadata about the actual contents of the file.
|
||||
type FileMeta struct {
|
||||
Original Original `validate:"required" bun:"embed:original_"`
|
||||
Small Small `bun:"embed:small_"`
|
||||
Focus Focus `bun:"embed:focus_"`
|
||||
}
|
||||
|
||||
// Small can be used for a thumbnail of any media type
|
||||
type Small struct {
|
||||
Width int `validate:"required_with=Height Size Aspect"` // width in pixels
|
||||
Height int `validate:"required_with=Width Size Aspect"` // height in pixels
|
||||
Size int `validate:"required_with=Width Height Aspect"` // size in pixels (width * height)
|
||||
Aspect float64 `validate:"required_with=Widhth Height Size"` // aspect ratio (width / height)
|
||||
}
|
||||
|
||||
// Original can be used for original metadata for any media type
|
||||
type Original struct {
|
||||
Width int `validate:"required_with=Height Size Aspect"` // width in pixels
|
||||
Height int `validate:"required_with=Width Size Aspect"` // height in pixels
|
||||
Size int `validate:"required_with=Width Height Aspect"` // size in pixels (width * height)
|
||||
Aspect float64 `validate:"required_with=Widhth Height Size"` // aspect ratio (width / height)
|
||||
}
|
||||
|
||||
// Focus describes the 'center' of the image for display purposes.
|
||||
// X and Y should each be between -1 and 1
|
||||
type Focus struct {
|
||||
X float32 `validate:"omitempty,max=1,min=-1"`
|
||||
Y float32 `validate:"omitempty,max=1,min=-1"`
|
||||
}
|
||||
|
|
@ -37,6 +37,20 @@ func whereEmptyOrNull(column string) func(*bun.SelectQuery) *bun.SelectQuery {
|
|||
}
|
||||
}
|
||||
|
||||
// whereNotEmptyAndNotNull is a convenience function to return a bun WhereGroup that specifies
|
||||
// that the given column should be NEITHER an empty string NOR null.
|
||||
//
|
||||
// Use it as follows:
|
||||
//
|
||||
// q = q.WhereGroup(" AND ", whereNotEmptyAndNotNull("whatever_column"))
|
||||
func whereNotEmptyAndNotNull(column string) func(*bun.SelectQuery) *bun.SelectQuery {
|
||||
return func(q *bun.SelectQuery) *bun.SelectQuery {
|
||||
return q.
|
||||
Where("? IS NOT NULL", bun.Ident(column)).
|
||||
Where("? != ''", bun.Ident(column))
|
||||
}
|
||||
}
|
||||
|
||||
// updateWhere parses []db.Where and adds it to the given update query.
|
||||
func updateWhere(q *bun.UpdateQuery, where []db.Where) {
|
||||
for _, w := range where {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ package db
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
)
|
||||
|
|
@ -28,4 +29,10 @@ import (
|
|||
type Media interface {
|
||||
// GetAttachmentByID gets a single attachment by its ID
|
||||
GetAttachmentByID(ctx context.Context, id string) (*gtsmodel.MediaAttachment, Error)
|
||||
// GetRemoteOlderThan gets limit n remote media attachments older than the given olderThan time.
|
||||
// These will be returned in order of attachment.created_at descending (newest to oldest in other words).
|
||||
//
|
||||
// The selected media attachments will be those with both a URL and a RemoteURL filled in.
|
||||
// In other words, media attachments that originated remotely, and that we currently have cached locally.
|
||||
GetRemoteOlderThan(ctx context.Context, olderThan time.Time, limit int) ([]*gtsmodel.MediaAttachment, Error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ type Federator interface {
|
|||
FederatingActor() pub.FederatingActor
|
||||
// FederatingDB returns the underlying FederatingDB interface.
|
||||
FederatingDB() federatingdb.DB
|
||||
// TransportController returns the underlying transport controller.
|
||||
TransportController() transport.Controller
|
||||
|
||||
// AuthenticateFederatedRequest can be used to check the authenticity of incoming http-signed requests for federating resources.
|
||||
// The given username will be used to create a transport for making outgoing requests. See the implementation for more detailed comments.
|
||||
|
|
@ -107,3 +109,7 @@ func (f *federator) FederatingActor() pub.FederatingActor {
|
|||
func (f *federator) FederatingDB() federatingdb.DB {
|
||||
return f.federatingDB
|
||||
}
|
||||
|
||||
func (f *federator) TransportController() transport.Controller {
|
||||
return f.transportController
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,17 +32,18 @@ type MediaAttachment struct {
|
|||
URL string `validate:"required_without=RemoteURL,omitempty,url" bun:",nullzero"` // Where can the attachment be retrieved on *this* server
|
||||
RemoteURL string `validate:"required_without=URL,omitempty,url" bun:",nullzero"` // Where can the attachment be retrieved on a remote server (empty for local media)
|
||||
Type FileType `validate:"oneof=Image Gif Audio Video Unknown" bun:",nullzero,notnull"` // Type of file (image/gif/audio/video)
|
||||
FileMeta FileMeta `validate:"required" bun:",nullzero,notnull"` // Metadata about the file
|
||||
FileMeta FileMeta `validate:"required" bun:",embed:filemeta_,nullzero,notnull"` // Metadata about the file
|
||||
AccountID string `validate:"required,ulid" bun:"type:CHAR(26),nullzero,notnull"` // To which account does this attachment belong
|
||||
Account *Account `validate:"-" bun:"rel:has-one"` // Account corresponding to accountID
|
||||
Description string `validate:"-" bun:""` // Description of the attachment (for screenreaders)
|
||||
ScheduledStatusID string `validate:"omitempty,ulid" bun:"type:CHAR(26),nullzero"` // To which scheduled status does this attachment belong
|
||||
Blurhash string `validate:"required_if=Type Image,required_if=Type Gif,required_if=Type Video" bun:",nullzero"` // What is the generated blurhash of this attachment
|
||||
Processing ProcessingStatus `validate:"oneof=0 1 2 666" bun:",notnull,default:2"` // What is the processing status of this attachment
|
||||
File File `validate:"required" bun:",notnull,nullzero"` // metadata for the whole file
|
||||
Thumbnail Thumbnail `validate:"required" bun:",notnull,nullzero"` // small image thumbnail derived from a larger image, video, or audio file.
|
||||
File File `validate:"required" bun:",embed:file_,notnull,nullzero"` // metadata for the whole file
|
||||
Thumbnail Thumbnail `validate:"required" bun:",embed:thumbnail_,notnull,nullzero"` // small image thumbnail derived from a larger image, video, or audio file.
|
||||
Avatar bool `validate:"-" bun:",notnull,default:false"` // Is this attachment being used as an avatar?
|
||||
Header bool `validate:"-" bun:",notnull,default:false"` // Is this attachment being used as a header?
|
||||
Cached bool `validate:"-" bun:",notnull"` // Is this attachment currently cached by our instance?
|
||||
}
|
||||
|
||||
// File refers to the metadata for the whole file
|
||||
|
|
@ -88,9 +89,9 @@ const (
|
|||
|
||||
// FileMeta describes metadata about the actual contents of the file.
|
||||
type FileMeta struct {
|
||||
Original Original `validate:"required"`
|
||||
Small Small
|
||||
Focus Focus
|
||||
Original Original `validate:"required" bun:"embed:original_"`
|
||||
Small Small `bun:"embed:small_"`
|
||||
Focus Focus `bun:"embed:focus_"`
|
||||
}
|
||||
|
||||
// Small can be used for a thumbnail of any media type
|
||||
|
|
|
|||
|
|
@ -21,11 +21,16 @@ package media
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-runners"
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
)
|
||||
|
||||
|
|
@ -61,6 +66,12 @@ type Manager interface {
|
|||
//
|
||||
// ai is optional and can be nil. Any additional information about the emoji provided will be put in the database.
|
||||
ProcessEmoji(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, shortcode string, id string, uri string, ai *AdditionalEmojiInfo) (*ProcessingEmoji, error)
|
||||
// RecacheMedia refetches, reprocesses, and recaches an existing attachment that has been uncached via pruneRemote.
|
||||
RecacheMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error)
|
||||
// PruneRemote prunes all remote media cached on this instance that's older than the given amount of days.
|
||||
// 'Pruning' in this context means removing the locally stored data of the attachment (both thumbnail and full size),
|
||||
// and setting 'cached' to false on the associated attachment.
|
||||
PruneRemote(ctx context.Context, olderThanDays int) (int, error)
|
||||
// NumWorkers returns the total number of workers available to this manager.
|
||||
NumWorkers() int
|
||||
// QueueSize returns the total capacity of the queue.
|
||||
|
|
@ -76,11 +87,12 @@ type Manager interface {
|
|||
}
|
||||
|
||||
type manager struct {
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
pool runners.WorkerPool
|
||||
numWorkers int
|
||||
queueSize int
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
pool runners.WorkerPool
|
||||
stopCronJobs func() error
|
||||
numWorkers int
|
||||
queueSize int
|
||||
}
|
||||
|
||||
// NewManager returns a media manager with the given db and underlying storage.
|
||||
|
|
@ -97,8 +109,10 @@ type manager struct {
|
|||
// For a 4 core machine, this will be 2 workers, and a queue length of 20.
|
||||
// For a single or 2-core machine, the media manager will get 1 worker, and a queue of length 10.
|
||||
func NewManager(database db.DB, storage *kv.KVStore) (Manager, error) {
|
||||
numWorkers := runtime.NumCPU() / 2
|
||||
|
||||
// configure the worker pool
|
||||
// make sure we always have at least 1 worker even on single-core machines
|
||||
numWorkers := runtime.NumCPU() / 2
|
||||
if numWorkers == 0 {
|
||||
numWorkers = 1
|
||||
}
|
||||
|
|
@ -112,11 +126,61 @@ func NewManager(database db.DB, storage *kv.KVStore) (Manager, error) {
|
|||
queueSize: queueSize,
|
||||
}
|
||||
|
||||
// start the worker pool
|
||||
if start := m.pool.Start(); !start {
|
||||
return nil, errors.New("could not start worker pool")
|
||||
}
|
||||
logrus.Debugf("started media manager worker pool with %d workers and queue capacity of %d", numWorkers, queueSize)
|
||||
|
||||
// start remote cache cleanup cronjob if configured
|
||||
cacheCleanupDays := viper.GetInt(config.Keys.MediaRemoteCacheDays)
|
||||
if cacheCleanupDays != 0 {
|
||||
// we need a way of cancelling running jobs if the media manager is told to stop
|
||||
pruneCtx, pruneCancel := context.WithCancel(context.Background())
|
||||
|
||||
// create a new cron instance and add a function to it
|
||||
c := cron.New(cron.WithLogger(&logrusWrapper{}))
|
||||
|
||||
pruneFunc := func() {
|
||||
begin := time.Now()
|
||||
pruned, err := m.PruneRemote(pruneCtx, cacheCleanupDays)
|
||||
if err != nil {
|
||||
logrus.Errorf("media manager: error pruning remote cache: %s", err)
|
||||
return
|
||||
}
|
||||
logrus.Infof("media manager: pruned %d remote cache entries in %s", pruned, time.Since(begin))
|
||||
}
|
||||
|
||||
// run every night
|
||||
entryID, err := c.AddFunc("@midnight", pruneFunc)
|
||||
if err != nil {
|
||||
pruneCancel()
|
||||
return nil, fmt.Errorf("error starting media manager remote cache cleanup job: %s", err)
|
||||
}
|
||||
|
||||
// since we're running a cron job, we should define how the manager should stop them
|
||||
m.stopCronJobs = func() error {
|
||||
// try to stop any jobs gracefully by waiting til they're finished
|
||||
cronCtx := c.Stop()
|
||||
|
||||
select {
|
||||
case <-cronCtx.Done():
|
||||
logrus.Infof("media manager: cron finished jobs and stopped gracefully")
|
||||
case <-time.After(1 * time.Minute):
|
||||
logrus.Infof("media manager: cron didn't stop after 60 seconds, will force close")
|
||||
break
|
||||
}
|
||||
|
||||
// whether the job is finished neatly or we had to wait a minute, cancel the context on the prune job
|
||||
pruneCancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// now start all the cron stuff we've lined up
|
||||
c.Start()
|
||||
logrus.Infof("started media manager remote cache cleanup job: will run next at %s", c.Entry(entryID).Next)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
|
@ -168,6 +232,30 @@ func (m *manager) ProcessEmoji(ctx context.Context, data DataFunc, postData Post
|
|||
return processingEmoji, nil
|
||||
}
|
||||
|
||||
func (m *manager) RecacheMedia(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error) {
|
||||
processingRecache, err := m.preProcessRecache(ctx, data, postData, attachmentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Tracef("RecacheMedia: about to enqueue recache with attachmentID %s, queue length is %d", processingRecache.AttachmentID(), m.pool.Queue())
|
||||
m.pool.Enqueue(func(innerCtx context.Context) {
|
||||
select {
|
||||
case <-innerCtx.Done():
|
||||
// if the inner context is done that means the worker pool is closing, so we should just return
|
||||
return
|
||||
default:
|
||||
// start loading the media already for the caller's convenience
|
||||
if _, err := processingRecache.LoadAttachment(innerCtx); err != nil {
|
||||
logrus.Errorf("RecacheMedia: error processing recache with attachmentID %s: %s", processingRecache.AttachmentID(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
logrus.Tracef("RecacheMedia: succesfully queued recache with attachmentID %s, queue length is %d", processingRecache.AttachmentID(), m.pool.Queue())
|
||||
|
||||
return processingRecache, nil
|
||||
}
|
||||
|
||||
func (m *manager) NumWorkers() int {
|
||||
return m.numWorkers
|
||||
}
|
||||
|
|
@ -186,10 +274,15 @@ func (m *manager) ActiveWorkers() int {
|
|||
|
||||
func (m *manager) Stop() error {
|
||||
logrus.Info("stopping media manager worker pool")
|
||||
|
||||
stopped := m.pool.Stop()
|
||||
if !stopped {
|
||||
return errors.New("could not stop media manager worker pool")
|
||||
}
|
||||
|
||||
if m.stopCronJobs != nil { // only defined if cron jobs are actually running
|
||||
logrus.Info("stopping media manager cache cleanup jobs")
|
||||
return m.stopCronJobs()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ import (
|
|||
"codeberg.org/gruf/go-store/kv"
|
||||
"codeberg.org/gruf/go-store/storage"
|
||||
"github.com/stretchr/testify/suite"
|
||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/db/bundb/migrations/20211113114307_init"
|
||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
|
@ -29,9 +30,10 @@ import (
|
|||
type MediaStandardTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
manager media.Manager
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
manager media.Manager
|
||||
testAttachments map[string]*gtsmodel.MediaAttachment
|
||||
}
|
||||
|
||||
func (suite *MediaStandardTestSuite) SetupSuite() {
|
||||
|
|
@ -45,6 +47,7 @@ func (suite *MediaStandardTestSuite) SetupSuite() {
|
|||
func (suite *MediaStandardTestSuite) SetupTest() {
|
||||
testrig.StandardStorageSetup(suite.storage, "../../testrig/media")
|
||||
testrig.StandardDBSetup(suite.db, nil)
|
||||
suite.testAttachments = testrig.NewTestAttachments()
|
||||
suite.manager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -66,6 +66,9 @@ type ProcessingMedia struct {
|
|||
|
||||
// track whether this media has already been put in the databse
|
||||
insertedInDB bool
|
||||
|
||||
// true if this is a recache, false if it's brand new media
|
||||
recache bool
|
||||
}
|
||||
|
||||
// AttachmentID returns the ID of the underlying media attachment without blocking processing.
|
||||
|
|
@ -93,8 +96,16 @@ func (p *ProcessingMedia) LoadAttachment(ctx context.Context) (*gtsmodel.MediaAt
|
|||
|
||||
// store the result in the database before returning it
|
||||
if !p.insertedInDB {
|
||||
if err := p.database.Put(ctx, p.attachment); err != nil {
|
||||
return nil, err
|
||||
if p.recache {
|
||||
// if it's a recache we should only need to update
|
||||
if err := p.database.UpdateByPrimaryKey(ctx, p.attachment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// otherwise we need to really PUT it
|
||||
if err := p.database.Put(ctx, p.attachment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
p.insertedInDB = true
|
||||
}
|
||||
|
|
@ -305,6 +316,7 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
|||
if err := p.storage.PutStream(p.attachment.File.Path, clean); err != nil {
|
||||
return fmt.Errorf("store: error storing stream: %s", err)
|
||||
}
|
||||
p.attachment.Cached = true
|
||||
|
||||
// if the original reader is a readcloser, close it since we're done with it now
|
||||
if rc, ok := reader.(io.ReadCloser); ok {
|
||||
|
|
@ -360,6 +372,7 @@ func (m *manager) preProcessMedia(ctx context.Context, data DataFunc, postData P
|
|||
Thumbnail: thumbnail,
|
||||
Avatar: false,
|
||||
Header: false,
|
||||
Cached: false,
|
||||
}
|
||||
|
||||
// check if we have additional info to add to the attachment,
|
||||
|
|
@ -418,3 +431,24 @@ func (m *manager) preProcessMedia(ctx context.Context, data DataFunc, postData P
|
|||
|
||||
return processingMedia, nil
|
||||
}
|
||||
|
||||
func (m *manager) preProcessRecache(ctx context.Context, data DataFunc, postData PostDataCallbackFunc, attachmentID string) (*ProcessingMedia, error) {
|
||||
// get the existing attachment
|
||||
attachment, err := m.db.GetAttachmentByID(ctx, attachmentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
processingMedia := &ProcessingMedia{
|
||||
attachment: attachment,
|
||||
data: data,
|
||||
postData: postData,
|
||||
thumbState: int32(received),
|
||||
fullSizeState: int32(received),
|
||||
database: m.db,
|
||||
storage: m.storage,
|
||||
recache: true, // indicate it's a recache
|
||||
}
|
||||
|
||||
return processingMedia, nil
|
||||
}
|
||||
|
|
|
|||
96
internal/media/pruneremote.go
Normal file
96
internal/media/pruneremote.go
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package media
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-store/storage"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
)
|
||||
|
||||
// amount of media attachments to select at a time from the db when pruning
|
||||
const selectPruneLimit = 20
|
||||
|
||||
func (m *manager) PruneRemote(ctx context.Context, olderThanDays int) (int, error) {
|
||||
var totalPruned int
|
||||
|
||||
// convert days into a duration string
|
||||
olderThanHoursString := fmt.Sprintf("%dh", olderThanDays*24)
|
||||
// parse the duration string into a duration
|
||||
olderThanHours, err := time.ParseDuration(olderThanHoursString)
|
||||
if err != nil {
|
||||
return totalPruned, fmt.Errorf("PruneRemote: %d", err)
|
||||
}
|
||||
// 'subtract' that from the time now to give our threshold
|
||||
olderThan := time.Now().Add(-olderThanHours)
|
||||
logrus.Infof("PruneRemote: pruning media older than %s", olderThan)
|
||||
|
||||
// select 20 attachments at a time and prune them
|
||||
for attachments, err := m.db.GetRemoteOlderThan(ctx, olderThan, selectPruneLimit); err == nil && len(attachments) != 0; attachments, err = m.db.GetRemoteOlderThan(ctx, olderThan, selectPruneLimit) {
|
||||
|
||||
// use the age of the oldest attachment (the last one in the slice) as the next 'older than' value
|
||||
l := len(attachments)
|
||||
logrus.Tracef("PruneRemote: got %d attachments older than %s", l, olderThan)
|
||||
olderThan = attachments[l-1].CreatedAt
|
||||
|
||||
// prune each attachment
|
||||
for _, attachment := range attachments {
|
||||
if err := m.PruneOne(ctx, attachment); err != nil {
|
||||
return totalPruned, err
|
||||
}
|
||||
totalPruned++
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we don't have a real error when we leave the loop
|
||||
if err != nil && err != db.ErrNoEntries {
|
||||
return totalPruned, err
|
||||
}
|
||||
|
||||
logrus.Infof("PruneRemote: finished pruning remote media: pruned %d entries", totalPruned)
|
||||
return totalPruned, nil
|
||||
}
|
||||
|
||||
func (m *manager) PruneOne(ctx context.Context, attachment *gtsmodel.MediaAttachment) error {
|
||||
if attachment.File.Path != "" {
|
||||
// delete the full size attachment from storage
|
||||
logrus.Tracef("PruneOne: deleting %s", attachment.File.Path)
|
||||
if err := m.storage.Delete(attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
attachment.Cached = false
|
||||
}
|
||||
|
||||
if attachment.Thumbnail.Path != "" {
|
||||
// delete the thumbnail from storage
|
||||
logrus.Tracef("PruneOne: deleting %s", attachment.Thumbnail.Path)
|
||||
if err := m.storage.Delete(attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
attachment.Cached = false
|
||||
}
|
||||
|
||||
// update the attachment to reflect that we no longer have it cached
|
||||
return m.db.UpdateByPrimaryKey(ctx, attachment)
|
||||
}
|
||||
111
internal/media/pruneremote_test.go
Normal file
111
internal/media/pruneremote_test.go
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package media_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"codeberg.org/gruf/go-store/storage"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type PruneRemoteTestSuite struct {
|
||||
MediaStandardTestSuite
|
||||
}
|
||||
|
||||
func (suite *PruneRemoteTestSuite) TestPruneRemote() {
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
suite.True(testAttachment.Cached)
|
||||
|
||||
totalPruned, err := suite.manager.PruneRemote(context.Background(), 1)
|
||||
suite.NoError(err)
|
||||
suite.Equal(1, totalPruned)
|
||||
|
||||
prunedAttachment, err := suite.db.GetAttachmentByID(context.Background(), testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
|
||||
// the media should no longer be cached
|
||||
suite.False(prunedAttachment.Cached)
|
||||
}
|
||||
|
||||
func (suite *PruneRemoteTestSuite) TestPruneRemoteTwice() {
|
||||
totalPruned, err := suite.manager.PruneRemote(context.Background(), 1)
|
||||
suite.NoError(err)
|
||||
suite.Equal(1, totalPruned)
|
||||
|
||||
// final prune should prune nothing, since the first prune already happened
|
||||
totalPrunedAgain, err := suite.manager.PruneRemote(context.Background(), 1)
|
||||
suite.NoError(err)
|
||||
suite.Equal(0, totalPrunedAgain)
|
||||
}
|
||||
|
||||
func (suite *PruneRemoteTestSuite) TestPruneAndRecache() {
|
||||
ctx := context.Background()
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
|
||||
totalPruned, err := suite.manager.PruneRemote(ctx, 1)
|
||||
suite.NoError(err)
|
||||
suite.Equal(1, totalPruned)
|
||||
|
||||
// media should no longer be stored
|
||||
_, err = suite.storage.Get(testAttachment.File.Path)
|
||||
suite.Error(err)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(testAttachment.Thumbnail.Path)
|
||||
suite.Error(err)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
|
||||
// now recache the image....
|
||||
data := func(_ context.Context) (io.Reader, int, error) {
|
||||
// load bytes from a test image
|
||||
b, err := os.ReadFile("../../testrig/media/thoughtsofdog-original.jpeg")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bytes.NewBuffer(b), len(b), nil
|
||||
}
|
||||
processingRecache, err := suite.manager.RecacheMedia(ctx, data, nil, testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
|
||||
// synchronously load the recached attachment
|
||||
recachedAttachment, err := processingRecache.LoadAttachment(ctx)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(recachedAttachment)
|
||||
|
||||
// recachedAttachment should be basically the same as the old attachment
|
||||
suite.True(recachedAttachment.Cached)
|
||||
suite.Equal(testAttachment.ID, recachedAttachment.ID)
|
||||
suite.Equal(testAttachment.File.Path, recachedAttachment.File.Path) // file should be stored in the same place
|
||||
suite.Equal(testAttachment.Thumbnail.Path, recachedAttachment.Thumbnail.Path) // as should the thumbnail
|
||||
suite.EqualValues(testAttachment.FileMeta, recachedAttachment.FileMeta) // and the filemeta should be the same
|
||||
|
||||
// recached files should be back in storage
|
||||
_, err = suite.storage.Get(recachedAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
_, err = suite.storage.Get(recachedAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
}
|
||||
|
||||
func TestPruneRemoteTestSuite(t *testing.T) {
|
||||
suite.Run(t, &PruneRemoteTestSuite{})
|
||||
}
|
||||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/h2non/filetype"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// parseContentType parses the MIME content type from a file, returning it as a string in the form (eg., "image/jpeg").
|
||||
|
|
@ -103,3 +104,17 @@ func ParseMediaSize(s string) (Size, error) {
|
|||
}
|
||||
return "", fmt.Errorf("%s not a recognized MediaSize", s)
|
||||
}
|
||||
|
||||
// logrusWrapper is just a util for passing the logrus logger into the cron logging system.
|
||||
type logrusWrapper struct {
|
||||
}
|
||||
|
||||
// Info logs routine messages about cron's operation.
|
||||
func (l *logrusWrapper) Info(msg string, keysAndValues ...interface{}) {
|
||||
logrus.Info("media manager cron logger: ", msg, keysAndValues)
|
||||
}
|
||||
|
||||
// Error logs an error condition.
|
||||
func (l *logrusWrapper) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||
logrus.Error("media manager cron logger: ", err, msg, keysAndValues)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,6 @@ func (p *processor) MediaUpdate(ctx context.Context, authed *oauth.Auth, mediaAt
|
|||
return p.mediaProcessor.Update(ctx, authed.Account, mediaAttachmentID, form)
|
||||
}
|
||||
|
||||
func (p *processor) FileGet(ctx context.Context, authed *oauth.Auth, form *apimodel.GetContentRequestForm) (*apimodel.Content, error) {
|
||||
func (p *processor) FileGet(ctx context.Context, authed *oauth.Auth, form *apimodel.GetContentRequestForm) (*apimodel.Content, gtserror.WithCode) {
|
||||
return p.mediaProcessor.GetFile(ctx, authed.Account, form)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,11 @@
|
|||
package media
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
|
|
@ -29,7 +32,7 @@ import (
|
|||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
)
|
||||
|
||||
func (p *processor) GetFile(ctx context.Context, account *gtsmodel.Account, form *apimodel.GetContentRequestForm) (*apimodel.Content, error) {
|
||||
func (p *processor) GetFile(ctx context.Context, account *gtsmodel.Account, form *apimodel.GetContentRequestForm) (*apimodel.Content, gtserror.WithCode) {
|
||||
// parse the form fields
|
||||
mediaSize, err := media.ParseMediaSize(form.MediaSize)
|
||||
if err != nil {
|
||||
|
|
@ -46,74 +49,208 @@ func (p *processor) GetFile(ctx context.Context, account *gtsmodel.Account, form
|
|||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("file name %s not parseable", form.FileName))
|
||||
}
|
||||
wantedMediaID := spl[0]
|
||||
expectedAccountID := form.AccountID
|
||||
|
||||
// get the account that owns the media and make sure it's not suspended
|
||||
acct, err := p.db.GetAccountByID(ctx, form.AccountID)
|
||||
acct, err := p.db.GetAccountByID(ctx, expectedAccountID)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("account with id %s could not be selected from the db: %s", form.AccountID, err))
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("account with id %s could not be selected from the db: %s", expectedAccountID, err))
|
||||
}
|
||||
if !acct.SuspendedAt.IsZero() {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("account with id %s is suspended", form.AccountID))
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("account with id %s is suspended", expectedAccountID))
|
||||
}
|
||||
|
||||
// make sure the requesting account and the media account don't block each other
|
||||
if account != nil {
|
||||
blocked, err := p.db.IsBlocked(ctx, account.ID, form.AccountID, true)
|
||||
blocked, err := p.db.IsBlocked(ctx, account.ID, expectedAccountID, true)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("block status could not be established between accounts %s and %s: %s", form.AccountID, account.ID, err))
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("block status could not be established between accounts %s and %s: %s", expectedAccountID, account.ID, err))
|
||||
}
|
||||
if blocked {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("block exists between accounts %s and %s", form.AccountID, account.ID))
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("block exists between accounts %s and %s", expectedAccountID, account.ID))
|
||||
}
|
||||
}
|
||||
|
||||
// the way we store emojis is a little different from the way we store other attachments,
|
||||
// so we need to take different steps depending on the media type being requested
|
||||
content := &apimodel.Content{}
|
||||
var storagePath string
|
||||
switch mediaType {
|
||||
case media.TypeEmoji:
|
||||
e := >smodel.Emoji{}
|
||||
if err := p.db.GetByID(ctx, wantedMediaID, e); err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("emoji %s could not be taken from the db: %s", wantedMediaID, err))
|
||||
}
|
||||
if e.Disabled {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("emoji %s has been disabled", wantedMediaID))
|
||||
}
|
||||
switch mediaSize {
|
||||
case media.SizeOriginal:
|
||||
content.ContentType = e.ImageContentType
|
||||
content.ContentLength = int64(e.ImageFileSize)
|
||||
storagePath = e.ImagePath
|
||||
case media.SizeStatic:
|
||||
content.ContentType = e.ImageStaticContentType
|
||||
content.ContentLength = int64(e.ImageStaticFileSize)
|
||||
storagePath = e.ImageStaticPath
|
||||
default:
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("media size %s not recognized for emoji", mediaSize))
|
||||
}
|
||||
return p.getEmojiContent(ctx, wantedMediaID, mediaSize)
|
||||
case media.TypeAttachment, media.TypeHeader, media.TypeAvatar:
|
||||
a, err := p.db.GetAttachmentByID(ctx, wantedMediaID)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("attachment %s could not be taken from the db: %s", wantedMediaID, err))
|
||||
return p.getAttachmentContent(ctx, account, wantedMediaID, expectedAccountID, mediaSize)
|
||||
default:
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("media type %s not recognized", mediaType))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount *gtsmodel.Account, wantedMediaID string, expectedAccountID string, mediaSize media.Size) (*apimodel.Content, gtserror.WithCode) {
|
||||
attachmentContent := &apimodel.Content{}
|
||||
var storagePath string
|
||||
|
||||
// retrieve attachment from the database and do basic checks on it
|
||||
a, err := p.db.GetAttachmentByID(ctx, wantedMediaID)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("attachment %s could not be taken from the db: %s", wantedMediaID, err))
|
||||
}
|
||||
|
||||
if a.AccountID != expectedAccountID {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("attachment %s is not owned by %s", wantedMediaID, expectedAccountID))
|
||||
}
|
||||
|
||||
// get file information from the attachment depending on the requested media size
|
||||
switch mediaSize {
|
||||
case media.SizeOriginal:
|
||||
attachmentContent.ContentType = a.File.ContentType
|
||||
attachmentContent.ContentLength = int64(a.File.FileSize)
|
||||
storagePath = a.File.Path
|
||||
case media.SizeSmall:
|
||||
attachmentContent.ContentType = a.Thumbnail.ContentType
|
||||
attachmentContent.ContentLength = int64(a.Thumbnail.FileSize)
|
||||
storagePath = a.Thumbnail.Path
|
||||
default:
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("media size %s not recognized for attachment", mediaSize))
|
||||
}
|
||||
|
||||
// if we have the media cached on our server already, we can now simply return it from storage
|
||||
if a.Cached {
|
||||
return p.streamFromStorage(storagePath, attachmentContent)
|
||||
}
|
||||
|
||||
// if we don't have it cached, then we can assume two things:
|
||||
// 1. this is remote media, since local media should never be uncached
|
||||
// 2. we need to fetch it again using a transport and the media manager
|
||||
remoteMediaIRI, err := url.Parse(a.RemoteURL)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error parsing remote media iri %s: %s", a.RemoteURL, err))
|
||||
}
|
||||
|
||||
// use an empty string as requestingUsername to use the instance account, unless the request for this
|
||||
// media has been http signed, then use the requesting account to make the request to remote server
|
||||
var requestingUsername string
|
||||
if requestingAccount != nil {
|
||||
requestingUsername = requestingAccount.Username
|
||||
}
|
||||
|
||||
var data media.DataFunc
|
||||
var postDataCallback media.PostDataCallbackFunc
|
||||
|
||||
if mediaSize == media.SizeSmall {
|
||||
// if it's the thumbnail that's requested then the user will have to wait a bit while we process the
|
||||
// large version and derive a thumbnail from it, so use the normal recaching procedure: fetch the media,
|
||||
// process it, then return the thumbnail data
|
||||
data = func(innerCtx context.Context) (io.Reader, int, error) {
|
||||
transport, err := p.transportController.NewTransportForUsername(innerCtx, requestingUsername)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return transport.DereferenceMedia(innerCtx, remoteMediaIRI)
|
||||
}
|
||||
if a.AccountID != form.AccountID {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("attachment %s is not owned by %s", wantedMediaID, form.AccountID))
|
||||
} else {
|
||||
// if it's the full-sized version being requested, we can cheat a bit by streaming data to the user as
|
||||
// it's retrieved from the remote server, using tee; this saves the user from having to wait while
|
||||
// we process the media on our side
|
||||
//
|
||||
// this looks a bit like this:
|
||||
//
|
||||
// http fetch buffered pipe
|
||||
// remote server ------------> data function ----------------> api caller
|
||||
// |
|
||||
// | tee
|
||||
// |
|
||||
// ▼
|
||||
// instance storage
|
||||
|
||||
// Buffer each end of the pipe, so that if the caller drops the connection during the flow, the tee
|
||||
// reader can continue without having to worry about tee-ing into a closed or blocked pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
bufferedWriter := bufio.NewWriterSize(pipeWriter, int(attachmentContent.ContentLength))
|
||||
bufferedReader := bufio.NewReaderSize(pipeReader, int(attachmentContent.ContentLength))
|
||||
|
||||
// the caller will read from the buffered reader, so it doesn't matter if they drop out without reading everything
|
||||
attachmentContent.Content = bufferedReader
|
||||
|
||||
data = func(innerCtx context.Context) (io.Reader, int, error) {
|
||||
transport, err := p.transportController.NewTransportForUsername(innerCtx, requestingUsername)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
readCloser, fileSize, err := transport.DereferenceMedia(innerCtx, remoteMediaIRI)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// everything read from the readCloser by the media manager will be written into the bufferedWriter
|
||||
teeReader := io.TeeReader(readCloser, bufferedWriter)
|
||||
return teeReader, fileSize, nil
|
||||
}
|
||||
switch mediaSize {
|
||||
case media.SizeOriginal:
|
||||
content.ContentType = a.File.ContentType
|
||||
content.ContentLength = int64(a.File.FileSize)
|
||||
storagePath = a.File.Path
|
||||
case media.SizeSmall:
|
||||
content.ContentType = a.Thumbnail.ContentType
|
||||
content.ContentLength = int64(a.Thumbnail.FileSize)
|
||||
storagePath = a.Thumbnail.Path
|
||||
default:
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("media size %s not recognized for attachment", mediaSize))
|
||||
|
||||
// close the pipewriter after data has been piped into it, so the reader on the other side doesn't block;
|
||||
// we don't need to close the reader here because that's the caller's responsibility
|
||||
postDataCallback = func(innerCtx context.Context) error {
|
||||
// flush the buffered writer into the buffer of the reader...
|
||||
if err := bufferedWriter.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// and close the underlying pipe writer
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// put the media recached in the queue
|
||||
processingMedia, err := p.mediaManager.RecacheMedia(ctx, data, postDataCallback, wantedMediaID)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error recaching media: %s", err))
|
||||
}
|
||||
|
||||
// if it's the thumbnail, stream the processed thumbnail from storage, after waiting for processing to finish
|
||||
if mediaSize == media.SizeSmall {
|
||||
// below function call blocks until all processing on the attachment has finished...
|
||||
if _, err := processingMedia.LoadAttachment(ctx); err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error loading recached attachment: %s", err))
|
||||
}
|
||||
// ... so now we can safely return it
|
||||
return p.streamFromStorage(storagePath, attachmentContent)
|
||||
}
|
||||
|
||||
return attachmentContent, nil
|
||||
}
|
||||
|
||||
func (p *processor) getEmojiContent(ctx context.Context, wantedEmojiID string, emojiSize media.Size) (*apimodel.Content, gtserror.WithCode) {
|
||||
emojiContent := &apimodel.Content{}
|
||||
var storagePath string
|
||||
|
||||
e := >smodel.Emoji{}
|
||||
if err := p.db.GetByID(ctx, wantedEmojiID, e); err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("emoji %s could not be taken from the db: %s", wantedEmojiID, err))
|
||||
}
|
||||
|
||||
if e.Disabled {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("emoji %s has been disabled", wantedEmojiID))
|
||||
}
|
||||
|
||||
switch emojiSize {
|
||||
case media.SizeOriginal:
|
||||
emojiContent.ContentType = e.ImageContentType
|
||||
emojiContent.ContentLength = int64(e.ImageFileSize)
|
||||
storagePath = e.ImagePath
|
||||
case media.SizeStatic:
|
||||
emojiContent.ContentType = e.ImageStaticContentType
|
||||
emojiContent.ContentLength = int64(e.ImageStaticFileSize)
|
||||
storagePath = e.ImageStaticPath
|
||||
default:
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("media size %s not recognized for emoji", emojiSize))
|
||||
}
|
||||
|
||||
return p.streamFromStorage(storagePath, emojiContent)
|
||||
}
|
||||
|
||||
func (p *processor) streamFromStorage(storagePath string, content *apimodel.Content) (*apimodel.Content, gtserror.WithCode) {
|
||||
reader, err := p.storage.GetStream(storagePath)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error retrieving from storage: %s", err))
|
||||
|
|
|
|||
208
internal/processing/media/getfile_test.go
Normal file
208
internal/processing/media/getfile_test.go
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package media_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
)
|
||||
|
||||
type GetFileTestSuite struct {
|
||||
MediaStandardTestSuite
|
||||
}
|
||||
|
||||
func (suite *GetFileTestSuite) TestGetRemoteFileCached() {
|
||||
ctx := context.Background()
|
||||
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
fileName := path.Base(testAttachment.File.Path)
|
||||
requestingAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
content, errWithCode := suite.mediaProcessor.GetFile(ctx, requestingAccount, &apimodel.GetContentRequestForm{
|
||||
AccountID: testAttachment.AccountID,
|
||||
MediaType: string(media.TypeAttachment),
|
||||
MediaSize: string(media.SizeOriginal),
|
||||
FileName: fileName,
|
||||
})
|
||||
|
||||
suite.NoError(errWithCode)
|
||||
suite.NotNil(content)
|
||||
b, err := io.ReadAll(content.Content)
|
||||
suite.NoError(err)
|
||||
|
||||
if closer, ok := content.Content.(io.Closer); ok {
|
||||
suite.NoError(closer.Close())
|
||||
}
|
||||
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, b)
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].ContentType, content.ContentType)
|
||||
suite.EqualValues(len(suite.testRemoteAttachments[testAttachment.RemoteURL].Data), content.ContentLength)
|
||||
}
|
||||
|
||||
func (suite *GetFileTestSuite) TestGetRemoteFileUncached() {
|
||||
ctx := context.Background()
|
||||
|
||||
// uncache the file from local
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
testAttachment.Cached = false
|
||||
err := suite.db.UpdateByPrimaryKey(ctx, testAttachment)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// now fetch it
|
||||
fileName := path.Base(testAttachment.File.Path)
|
||||
requestingAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
content, errWithCode := suite.mediaProcessor.GetFile(ctx, requestingAccount, &apimodel.GetContentRequestForm{
|
||||
AccountID: testAttachment.AccountID,
|
||||
MediaType: string(media.TypeAttachment),
|
||||
MediaSize: string(media.SizeOriginal),
|
||||
FileName: fileName,
|
||||
})
|
||||
|
||||
suite.NoError(errWithCode)
|
||||
suite.NotNil(content)
|
||||
b, err := io.ReadAll(content.Content)
|
||||
suite.NoError(err)
|
||||
|
||||
if closer, ok := content.Content.(io.Closer); ok {
|
||||
suite.NoError(closer.Close())
|
||||
}
|
||||
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, b)
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].ContentType, content.ContentType)
|
||||
suite.EqualValues(len(suite.testRemoteAttachments[testAttachment.RemoteURL].Data), content.ContentLength)
|
||||
time.Sleep(2 * time.Second) // wait a few seconds for the media manager to finish doing stuff
|
||||
|
||||
// the attachment should be updated in the database
|
||||
dbAttachment, err := suite.db.GetAttachmentByID(ctx, testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
suite.True(dbAttachment.Cached)
|
||||
|
||||
// the file should be back in storage at the same path as before
|
||||
refreshedBytes, err := suite.storage.Get(testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, refreshedBytes)
|
||||
}
|
||||
|
||||
func (suite *GetFileTestSuite) TestGetRemoteFileUncachedInterrupted() {
|
||||
ctx := context.Background()
|
||||
|
||||
// uncache the file from local
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
testAttachment.Cached = false
|
||||
err := suite.db.UpdateByPrimaryKey(ctx, testAttachment)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// now fetch it
|
||||
fileName := path.Base(testAttachment.File.Path)
|
||||
requestingAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
content, errWithCode := suite.mediaProcessor.GetFile(ctx, requestingAccount, &apimodel.GetContentRequestForm{
|
||||
AccountID: testAttachment.AccountID,
|
||||
MediaType: string(media.TypeAttachment),
|
||||
MediaSize: string(media.SizeOriginal),
|
||||
FileName: fileName,
|
||||
})
|
||||
|
||||
suite.NoError(errWithCode)
|
||||
suite.NotNil(content)
|
||||
|
||||
// only read the first kilobyte and then stop
|
||||
b := make([]byte, 1024)
|
||||
_, err = content.Content.Read(b)
|
||||
suite.NoError(err)
|
||||
|
||||
// close the reader
|
||||
if closer, ok := content.Content.(io.Closer); ok {
|
||||
suite.NoError(closer.Close())
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second) // wait a few seconds for the media manager to finish doing stuff
|
||||
|
||||
// the attachment should still be updated in the database even though the caller hung up
|
||||
dbAttachment, err := suite.db.GetAttachmentByID(ctx, testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
suite.True(dbAttachment.Cached)
|
||||
|
||||
// the file should be back in storage at the same path as before
|
||||
refreshedBytes, err := suite.storage.Get(testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, refreshedBytes)
|
||||
}
|
||||
|
||||
func (suite *GetFileTestSuite) TestGetRemoteFileThumbnailUncached() {
|
||||
ctx := context.Background()
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
|
||||
// fetch the existing thumbnail bytes from storage first
|
||||
thumbnailBytes, err := suite.storage.Get(testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// uncache the file from local
|
||||
testAttachment.Cached = false
|
||||
err = suite.db.UpdateByPrimaryKey(ctx, testAttachment)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// now fetch the thumbnail
|
||||
fileName := path.Base(testAttachment.File.Path)
|
||||
requestingAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
content, errWithCode := suite.mediaProcessor.GetFile(ctx, requestingAccount, &apimodel.GetContentRequestForm{
|
||||
AccountID: testAttachment.AccountID,
|
||||
MediaType: string(media.TypeAttachment),
|
||||
MediaSize: string(media.SizeSmall),
|
||||
FileName: fileName,
|
||||
})
|
||||
|
||||
suite.NoError(errWithCode)
|
||||
suite.NotNil(content)
|
||||
b, err := io.ReadAll(content.Content)
|
||||
suite.NoError(err)
|
||||
|
||||
if closer, ok := content.Content.(io.Closer); ok {
|
||||
suite.NoError(closer.Close())
|
||||
}
|
||||
|
||||
suite.Equal(thumbnailBytes, b)
|
||||
suite.Equal("image/jpeg", content.ContentType)
|
||||
suite.EqualValues(testAttachment.Thumbnail.FileSize, content.ContentLength)
|
||||
}
|
||||
|
||||
func TestGetFileTestSuite(t *testing.T) {
|
||||
suite.Run(t, &GetFileTestSuite{})
|
||||
}
|
||||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
)
|
||||
|
||||
|
|
@ -36,24 +37,27 @@ type Processor interface {
|
|||
Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, error)
|
||||
// Delete deletes the media attachment with the given ID, including all files pertaining to that attachment.
|
||||
Delete(ctx context.Context, mediaAttachmentID string) gtserror.WithCode
|
||||
GetFile(ctx context.Context, account *gtsmodel.Account, form *apimodel.GetContentRequestForm) (*apimodel.Content, error)
|
||||
// GetFile retrieves a file from storage and streams it back to the caller via an io.reader embedded in *apimodel.Content.
|
||||
GetFile(ctx context.Context, account *gtsmodel.Account, form *apimodel.GetContentRequestForm) (*apimodel.Content, gtserror.WithCode)
|
||||
GetMedia(ctx context.Context, account *gtsmodel.Account, mediaAttachmentID string) (*apimodel.Attachment, gtserror.WithCode)
|
||||
Update(ctx context.Context, account *gtsmodel.Account, mediaAttachmentID string, form *apimodel.AttachmentUpdateRequest) (*apimodel.Attachment, gtserror.WithCode)
|
||||
}
|
||||
|
||||
type processor struct {
|
||||
tc typeutils.TypeConverter
|
||||
mediaManager media.Manager
|
||||
storage *kv.KVStore
|
||||
db db.DB
|
||||
tc typeutils.TypeConverter
|
||||
mediaManager media.Manager
|
||||
transportController transport.Controller
|
||||
storage *kv.KVStore
|
||||
db db.DB
|
||||
}
|
||||
|
||||
// New returns a new media processor.
|
||||
func New(db db.DB, tc typeutils.TypeConverter, mediaManager media.Manager, storage *kv.KVStore) Processor {
|
||||
func New(db db.DB, tc typeutils.TypeConverter, mediaManager media.Manager, transportController transport.Controller, storage *kv.KVStore) Processor {
|
||||
return &processor{
|
||||
tc: tc,
|
||||
mediaManager: mediaManager,
|
||||
storage: storage,
|
||||
db: db,
|
||||
tc: tc,
|
||||
mediaManager: mediaManager,
|
||||
transportController: transportController,
|
||||
storage: storage,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
125
internal/processing/media/media_test.go
Normal file
125
internal/processing/media/media_test.go
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package media_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
mediaprocessing "github.com/superseriousbusiness/gotosocial/internal/processing/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
type MediaStandardTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
tc typeutils.TypeConverter
|
||||
storage *kv.KVStore
|
||||
mediaManager media.Manager
|
||||
transportController transport.Controller
|
||||
|
||||
// standard suite models
|
||||
testTokens map[string]*gtsmodel.Token
|
||||
testClients map[string]*gtsmodel.Client
|
||||
testApplications map[string]*gtsmodel.Application
|
||||
testUsers map[string]*gtsmodel.User
|
||||
testAccounts map[string]*gtsmodel.Account
|
||||
testAttachments map[string]*gtsmodel.MediaAttachment
|
||||
testStatuses map[string]*gtsmodel.Status
|
||||
testRemoteAttachments map[string]testrig.RemoteAttachmentFile
|
||||
|
||||
// module being tested
|
||||
mediaProcessor mediaprocessing.Processor
|
||||
}
|
||||
|
||||
func (suite *MediaStandardTestSuite) SetupSuite() {
|
||||
suite.testTokens = testrig.NewTestTokens()
|
||||
suite.testClients = testrig.NewTestClients()
|
||||
suite.testApplications = testrig.NewTestApplications()
|
||||
suite.testUsers = testrig.NewTestUsers()
|
||||
suite.testAccounts = testrig.NewTestAccounts()
|
||||
suite.testAttachments = testrig.NewTestAttachments()
|
||||
suite.testStatuses = testrig.NewTestStatuses()
|
||||
suite.testRemoteAttachments = testrig.NewTestFediAttachments("../../../testrig/media")
|
||||
}
|
||||
|
||||
func (suite *MediaStandardTestSuite) SetupTest() {
|
||||
testrig.InitTestConfig()
|
||||
testrig.InitTestLog()
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.transportController = suite.mockTransportController()
|
||||
suite.mediaProcessor = mediaprocessing.New(suite.db, suite.tc, suite.mediaManager, suite.transportController, suite.storage)
|
||||
testrig.StandardDBSetup(suite.db, nil)
|
||||
testrig.StandardStorageSetup(suite.storage, "../../../testrig/media")
|
||||
}
|
||||
|
||||
func (suite *MediaStandardTestSuite) TearDownTest() {
|
||||
testrig.StandardDBTeardown(suite.db)
|
||||
testrig.StandardStorageTeardown(suite.storage)
|
||||
}
|
||||
|
||||
func (suite *MediaStandardTestSuite) mockTransportController() transport.Controller {
|
||||
do := func(req *http.Request) (*http.Response, error) {
|
||||
logrus.Debugf("received request for %s", req.URL)
|
||||
|
||||
responseBytes := []byte{}
|
||||
responseType := ""
|
||||
responseLength := 0
|
||||
|
||||
if attachment, ok := suite.testRemoteAttachments[req.URL.String()]; ok {
|
||||
responseBytes = attachment.Data
|
||||
responseType = attachment.ContentType
|
||||
}
|
||||
|
||||
if len(responseBytes) != 0 {
|
||||
// we found something, so print what we're going to return
|
||||
logrus.Debugf("returning response %s", string(responseBytes))
|
||||
}
|
||||
responseLength = len(responseBytes)
|
||||
|
||||
reader := bytes.NewReader(responseBytes)
|
||||
readCloser := io.NopCloser(reader)
|
||||
response := &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: readCloser,
|
||||
ContentLength: int64(responseLength),
|
||||
Header: http.Header{
|
||||
"content-type": {responseType},
|
||||
},
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
mockClient := testrig.NewMockHTTPClient(do)
|
||||
return testrig.NewTestTransportController(mockClient, suite.db)
|
||||
}
|
||||
|
|
@ -115,7 +115,7 @@ type Processor interface {
|
|||
BlocksGet(ctx context.Context, authed *oauth.Auth, maxID string, sinceID string, limit int) (*apimodel.BlocksResponse, gtserror.WithCode)
|
||||
|
||||
// FileGet handles the fetching of a media attachment file via the fileserver.
|
||||
FileGet(ctx context.Context, authed *oauth.Auth, form *apimodel.GetContentRequestForm) (*apimodel.Content, error)
|
||||
FileGet(ctx context.Context, authed *oauth.Auth, form *apimodel.GetContentRequestForm) (*apimodel.Content, gtserror.WithCode)
|
||||
|
||||
// FollowRequestsGet handles the getting of the authed account's incoming follow requests
|
||||
FollowRequestsGet(ctx context.Context, auth *oauth.Auth) ([]apimodel.Account, gtserror.WithCode)
|
||||
|
|
@ -270,7 +270,7 @@ func NewProcessor(
|
|||
streamingProcessor := streaming.New(db, oauthServer)
|
||||
accountProcessor := account.New(db, tc, mediaManager, oauthServer, fromClientAPI, federator)
|
||||
adminProcessor := admin.New(db, tc, mediaManager, fromClientAPI)
|
||||
mediaProcessor := mediaProcessor.New(db, tc, mediaManager, storage)
|
||||
mediaProcessor := mediaProcessor.New(db, tc, mediaManager, federator.TransportController(), storage)
|
||||
userProcessor := user.New(db, emailSender)
|
||||
federationProcessor := federationProcessor.New(db, tc, federator, fromFederator)
|
||||
filter := visibility.NewFilter(db)
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func (suite *NotificationTestSuite) TestStreamNotification() {
|
|||
suite.NoError(err)
|
||||
|
||||
msg := <-openStream.Messages
|
||||
suite.Equal(`{"id":"01FH57SJCMDWQGEAJ0X08CE3WV","type":"follow","created_at":"2021-10-04T10:52:36+02:00","account":{"id":"01F8MH5ZK5VRH73AKHQM6Y9VNX","username":"foss_satan","acct":"foss_satan@fossbros-anonymous.io","display_name":"big gerald","locked":false,"bot":false,"created_at":"2021-09-26T12:52:36+02:00","note":"i post about like, i dunno, stuff, or whatever!!!!","url":"http://fossbros-anonymous.io/@foss_satan","avatar":"","avatar_static":"","header":"","header_static":"","followers_count":0,"following_count":0,"statuses_count":0,"last_status_at":"","emojis":[],"fields":[]}}`, msg.Payload)
|
||||
suite.Equal(`{"id":"01FH57SJCMDWQGEAJ0X08CE3WV","type":"follow","created_at":"2021-10-04T10:52:36+02:00","account":{"id":"01F8MH5ZK5VRH73AKHQM6Y9VNX","username":"foss_satan","acct":"foss_satan@fossbros-anonymous.io","display_name":"big gerald","locked":false,"bot":false,"created_at":"2021-09-26T12:52:36+02:00","note":"i post about like, i dunno, stuff, or whatever!!!!","url":"http://fossbros-anonymous.io/@foss_satan","avatar":"","avatar_static":"","header":"","header_static":"","followers_count":0,"following_count":0,"statuses_count":1,"last_status_at":"2021-09-20T10:40:37Z","emojis":[],"fields":[]}}`, msg.Payload)
|
||||
}
|
||||
|
||||
func TestNotificationTestSuite(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -84,8 +84,8 @@ func (suite *GetTestSuite) TestGetDefault() {
|
|||
suite.FailNow(err.Error())
|
||||
}
|
||||
|
||||
// we only have 14 statuses in the test suite
|
||||
suite.Len(statuses, 14)
|
||||
// we only have 15 statuses in the test suite
|
||||
suite.Len(statuses, 15)
|
||||
|
||||
// statuses should be sorted highest to lowest ID
|
||||
var highest string
|
||||
|
|
@ -177,8 +177,8 @@ func (suite *GetTestSuite) TestGetMinID() {
|
|||
suite.FailNow(err.Error())
|
||||
}
|
||||
|
||||
// we should only get 7 statuses back, since we asked for a min ID that excludes some of our entries
|
||||
suite.Len(statuses, 7)
|
||||
// we should only get 8 statuses back, since we asked for a min ID that excludes some of our entries
|
||||
suite.Len(statuses, 8)
|
||||
|
||||
// statuses should be sorted highest to lowest ID
|
||||
var highest string
|
||||
|
|
@ -199,8 +199,8 @@ func (suite *GetTestSuite) TestGetSinceID() {
|
|||
suite.FailNow(err.Error())
|
||||
}
|
||||
|
||||
// we should only get 7 statuses back, since we asked for a since ID that excludes some of our entries
|
||||
suite.Len(statuses, 7)
|
||||
// we should only get 8 statuses back, since we asked for a since ID that excludes some of our entries
|
||||
suite.Len(statuses, 8)
|
||||
|
||||
// statuses should be sorted highest to lowest ID
|
||||
var highest string
|
||||
|
|
@ -221,8 +221,8 @@ func (suite *GetTestSuite) TestGetSinceIDPrepareNext() {
|
|||
suite.FailNow(err.Error())
|
||||
}
|
||||
|
||||
// we should only get 7 statuses back, since we asked for a since ID that excludes some of our entries
|
||||
suite.Len(statuses, 7)
|
||||
// we should only get 8 statuses back, since we asked for a since ID that excludes some of our entries
|
||||
suite.Len(statuses, 8)
|
||||
|
||||
// statuses should be sorted highest to lowest ID
|
||||
var highest string
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue