diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7359d65fa..10a05002c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -538,23 +538,21 @@ It may be useful when testing or debugging migrations to be able to run them aga Basic steps for this: -1. Dump the Postgres database on the remote machine, and copy the dump over to your development machine. -2. Create a local Postgres container and mount the dump into it with, for example: - - ```bash - docker run -it --name postgres --network host -e POSTGRES_PASSWORD=postgres -v /path/to/db_dump:/db_dump postgres - ``` -3. Get a terminal inside the running container: - - ```bash - docker exec -it --user postgres postgres bash - ``` -4. Using that terminal, restore the dump (this will probably take a little while depending on the dump size and the specs of your machine): - - ```bash - psql -X postgres < /db_dump - ``` -5. With the Postgres container still running, run GoToSocial and point it towards the container. Use the appropriate `GTS_HOST` (and `GTS_ACCOUNT_DOMAIN`) values for the instance you dumped: +First dump the Postgres database on the remote machine, and copy the dump over to your development machine. + +Now create a local Postgres container and mount the dump into it with, for example: + +```bash +docker run -it --name postgres --network host -e POSTGRES_PASSWORD=postgres -v /path/to/db_dump:/db_dump postgres +``` + +In a separate terminal window, execute a command inside the running container to load the dump into the "postgres" database: + +```bash +docker exec -it --user postgres postgres psql -X -f /db_dump postgres +``` + +With the Postgres container still running, run GoToSocial and point it towards the container. Use the appropriate `GTS_HOST` (and `GTS_ACCOUNT_DOMAIN`) values for the instance you dumped: ```bash GTS_HOST=example.org \ diff --git a/README.md b/README.md index 7739f1ad2..605f654f6 100644 --- a/README.md +++ b/README.md @@ -328,7 +328,7 @@ This is the current status of support offered by GoToSocial for different platfo Notes on 64-bit CPU feature requirements: -- x86_64 requires the SSE4.1 instruction set. (CPUs manufactured after ~2010) +- x86_64 requires the [x86-64-v2](https://en.wikipedia.org/wiki/X86-64-v2) level instruction sets. (CPUs manufactured after ~2010) - ARM64 requires no specific features, ARMv8 CPUs (and later) have all required features. diff --git a/go.mod b/go.mod index 8accf4398..b1d744ffa 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( codeberg.org/gruf/go-errors/v2 v2.3.2 codeberg.org/gruf/go-fastcopy v1.1.3 codeberg.org/gruf/go-fastpath/v2 v2.0.0 - codeberg.org/gruf/go-ffmpreg v0.6.11 + codeberg.org/gruf/go-ffmpreg v0.6.12 codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf codeberg.org/gruf/go-kv/v2 v2.0.7 codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f diff --git a/go.sum b/go.sum index 6b9b97d00..507d2d38f 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7x codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0= codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q= -codeberg.org/gruf/go-ffmpreg v0.6.11 h1:+lvB5Loy0KUAKfv6nOZRWHFVgN08cpHhUlYcZxL8M20= -codeberg.org/gruf/go-ffmpreg v0.6.11/go.mod h1:tGqIMh/I2cizqauxxNAN+WGkICI0j5G3xwF1uBkyw1E= +codeberg.org/gruf/go-ffmpreg v0.6.12 h1:mPdRx1TAQJQPhRkTOOHnRSY6omNCLJ7M6ajjuEMNNvE= +codeberg.org/gruf/go-ffmpreg v0.6.12/go.mod h1:tGqIMh/I2cizqauxxNAN+WGkICI0j5G3xwF1uBkyw1E= codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A= codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk= codeberg.org/gruf/go-kv v1.6.5 h1:ttPf0NA8F79pDqBttSudPTVCZmGncumeNIxmeM9ztz0= diff --git a/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go b/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go index daf392ee6..9115dfe90 100644 --- a/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go +++ b/internal/db/bundb/migrations/20250415111056_thread_all_statuses.go @@ -96,7 +96,7 @@ func init() { clear(statuses) statuses = statuses[:0] - start := time.Now() + batchStart := time.Now() // Select IDs of next // batch, paging down. @@ -106,46 +106,51 @@ func init() { Where("? IS NULL", bun.Ident("in_reply_to_id")). Where("? < ?", bun.Ident("id"), maxID). OrderExpr("? DESC", bun.Ident("id")). - Limit(100). + Limit(500). Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) { - return gtserror.Newf("error selecting top-level statuses: %w", err) + return gtserror.Newf("error selecting statuses: %w", err) } - // Every 50 loops, flush wal and begin new - // transaction, to avoid silly wal sizes. - if i%50 == 0 { + l := len(statuses) + if l == 0 { + // No more statuses! + // + // Transaction will be closed + // after leaving the loop. + break + + } else if i%100 == 0 { + // Begin a new transaction every + // 100 batches (~50000 statuses), + // to avoid massive commits. + + // Close existing transaction. if err := tx.Commit(); err != nil { return err } + // Try to flush the wal + // to avoid silly wal sizes. if err := doWALCheckpoint(ctx, db); err != nil { return err } + // Open new transaction. tx, err = db.BeginTx(ctx, nil) if err != nil { return err } } - // No more statuses! - l := len(statuses) - if l == 0 { - if err := tx.Commit(); err != nil { - return err - } - - log.Info(ctx, "done migrating statuses!") - break - } - - // Set next maxID value from statuses. + // Set next maxID + // value from statuses. maxID = statuses[l-1].ID - // Rethread inside the transaction. + // Rethread using the + // open transaction. var updatedRowsThisBatch int64 for _, status := range statuses { - n, err := sr.rethreadStatus(ctx, tx, status) + n, err := sr.rethreadStatus(ctx, tx, status, false) if err != nil { return gtserror.Newf("error rethreading status %s: %w", status.URI, err) } @@ -154,7 +159,7 @@ func init() { } // Show speed for this batch. - timeTaken := time.Since(start).Milliseconds() + timeTaken := time.Since(batchStart).Milliseconds() msPerRow := float64(timeTaken) / float64(updatedRowsThisBatch) rowsPerMs := float64(1) / float64(msPerRow) rowsPerSecond := 1000 * rowsPerMs @@ -164,94 +169,73 @@ func init() { log.Infof( ctx, - "[~%.2f%% done; ~%.0f rows/s] paging top-level statuses", + "[~%.2f%% done; ~%.0f rows/s] migrating threads", totalDone, rowsPerSecond, ) } - if err := doWALCheckpoint(ctx, db); err != nil { + // Close transaction. + if err := tx.Commit(); err != nil { return err } - // Reset max ID. - maxID = id.Highest - - // Create a temporary index on thread_id_new for stragglers. + // Create a partial index on thread_id_new to find stragglers. + // This index will be removed at the end of the migration. log.Info(ctx, "creating temporary statuses thread_id_new index") if _, err := db.NewCreateIndex(). Table("statuses"). Index("statuses_thread_id_new_idx"). Column("thread_id_new"). + Where("? = ?", bun.Ident("thread_id_new"), id.Lowest). Exec(ctx); err != nil { return gtserror.Newf("error creating new thread_id index: %w", err) } - // Open a new transaction lads. - tx, err = db.BeginTx(ctx, nil) - if err != nil { - return err - } - for i := 1; ; i++ { // Reset slice. clear(statuses) statuses = statuses[:0] - start := time.Now() + batchStart := time.Now() - // Select IDs of stragglers for - // which we haven't set thread_id yet. - if err := tx.NewSelect(). + // Get stragglers for which + // we haven't set thread ID yet. + if err := db.NewSelect(). Model(&statuses). Column("id"). Where("? = ?", bun.Ident("thread_id_new"), id.Lowest). - Limit(500). + Limit(250). Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) { - return gtserror.Newf("error selecting unthreaded statuses: %w", err) + return gtserror.Newf("error selecting straggler: %w", err) } - // Every 50 loops, flush wal and begin new - // transaction, to avoid silly wal sizes. - if i%50 == 0 { - if err := tx.Commit(); err != nil { - return err - } - - if err := doWALCheckpoint(ctx, db); err != nil { - return err - } - - tx, err = db.BeginTx(ctx, nil) - if err != nil { - return err - } - } - - // No more statuses! - l := len(statuses) - if l == 0 { - if err := tx.Commit(); err != nil { - return err - } - - log.Info(ctx, "done migrating statuses!") + if len(statuses) == 0 { + // No more + // statuses! break } - // Rethread inside the transaction. + // Update this batch + // inside a transaction. var updatedRowsThisBatch int64 - for _, status := range statuses { - n, err := sr.rethreadStatus(ctx, tx, status) - if err != nil { - return gtserror.Newf("error rethreading status %s: %w", status.URI, err) + if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { + for _, status := range statuses { + + n, err := sr.rethreadStatus(ctx, tx, status, true) + if err != nil { + return gtserror.Newf("error rethreading status %s: %w", status.URI, err) + } + updatedRowsThisBatch += n + updatedRowsTotal += n } - updatedRowsThisBatch += n - updatedRowsTotal += n + return nil + }); err != nil { + return err } // Show speed for this batch. - timeTaken := time.Since(start).Milliseconds() + timeTaken := time.Since(batchStart).Milliseconds() msPerRow := float64(timeTaken) / float64(updatedRowsThisBatch) rowsPerMs := float64(1) / float64(msPerRow) rowsPerSecond := 1000 * rowsPerMs @@ -261,11 +245,16 @@ func init() { log.Infof( ctx, - "[~%.2f%% done; ~%.0f rows/s] cleaning up stragglers", + "[~%.2f%% done; ~%.0f rows/s] migrating stragglers", totalDone, rowsPerSecond, ) } + // Try to merge everything we've done so far. + if err := doWALCheckpoint(ctx, db); err != nil { + return err + } + log.Info(ctx, "dropping temporary thread_id_new index") if _, err := db.NewDropIndex(). Index("statuses_thread_id_new_idx"). @@ -363,7 +352,7 @@ type statusRethreader struct { // rethreadStatus is the main logic handler for statusRethreader{}. this is what gets called from the migration // in order to trigger a status rethreading operation for the given status, returning total number of rows changed. -func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, status *oldmodel.Status) (int64, error) { +func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, status *oldmodel.Status, straggler bool) (int64, error) { // Zero slice and // map ptr values. @@ -405,11 +394,11 @@ func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, statu } // Set up-to-date values on the status. - if inReplyToID, ok := upToDateValues["in_reply_to_id"]; ok && inReplyToID != nil { - status.InReplyToID = inReplyToID.(string) + if v, ok := upToDateValues["in_reply_to_id"]; ok && v != nil { + status.InReplyToID = v.(string) } - if threadID, ok := upToDateValues["thread_id"]; ok && threadID != nil { - status.ThreadID = threadID.(string) + if v, ok := upToDateValues["thread_id"]; ok && v != nil { + status.ThreadID = v.(string) } // status and thread ID cursor @@ -463,7 +452,10 @@ func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, statu // Check for the case where the entire // batch of statuses is already correctly // threaded. Then we have nothing to do! - if sr.allThreaded && len(sr.threadIDs) == 1 { + // + // Skip this check for straggler statuses + // that are part of broken threads. + if !straggler && sr.allThreaded && len(sr.threadIDs) == 1 { log.Debug(ctx, "skipping just rethreaded thread") return 0, nil } diff --git a/internal/media/ffmpeg.go b/internal/media/ffmpeg.go index d98e93baf..938a10894 100644 --- a/internal/media/ffmpeg.go +++ b/internal/media/ffmpeg.go @@ -21,8 +21,6 @@ import ( "context" "encoding/json" "errors" - "os" - "path" "strconv" "strings" @@ -158,34 +156,20 @@ func ffmpeg(ctx context.Context, inpath string, outpath string, args ...string) Config: func(modcfg wazero.ModuleConfig) wazero.ModuleConfig { fscfg := wazero.NewFSConfig() - // Needs read-only access to - // /dev/urandom for some types. - urandom := &allowFiles{ - { - abs: "/dev/urandom", - flag: os.O_RDONLY, - perm: 0, - }, - } - fscfg = fscfg.WithFSMount(urandom, "/dev") + // Needs read-only access /dev/urandom, + // required by some ffmpeg operations. + fscfg = fscfg.WithFSMount(&allowFiles{ + allowRead("/dev/urandom"), + }, "/dev") // In+out dirs are always the same (tmp), // so we can share one file system for // both + grant different perms to inpath // (read only) and outpath (read+write). - shared := &allowFiles{ - { - abs: inpath, - flag: os.O_RDONLY, - perm: 0, - }, - { - abs: outpath, - flag: os.O_RDWR | os.O_CREATE | os.O_TRUNC, - perm: 0666, - }, - } - fscfg = fscfg.WithFSMount(shared, path.Dir(inpath)) + fscfg = fscfg.WithFSMount(&allowFiles{ + allowCreate(outpath), + allowRead(inpath), + }, tmpdir) // Set anonymous module name. modcfg = modcfg.WithName("") @@ -246,16 +230,10 @@ func ffprobe(ctx context.Context, filepath string) (*result, error) { Config: func(modcfg wazero.ModuleConfig) wazero.ModuleConfig { fscfg := wazero.NewFSConfig() - // Needs read-only access - // to file being probed. - in := &allowFiles{ - { - abs: filepath, - flag: os.O_RDONLY, - perm: 0, - }, - } - fscfg = fscfg.WithFSMount(in, path.Dir(filepath)) + // Needs read-only access to probed file. + fscfg = fscfg.WithFSMount(&allowFiles{ + allowRead(filepath), + }, tmpdir) // Set anonymous module name. modcfg = modcfg.WithName("") diff --git a/internal/media/ffmpeg/wasm.go b/internal/media/ffmpeg/wasm.go index 1cd92f05d..f395032fa 100644 --- a/internal/media/ffmpeg/wasm.go +++ b/internal/media/ffmpeg/wasm.go @@ -21,12 +21,12 @@ package ffmpeg import ( "context" + "errors" "os" "runtime" "sync/atomic" "unsafe" - "code.superseriousbusiness.org/gotosocial/internal/log" "codeberg.org/gruf/go-ffmpreg/embed" "codeberg.org/gruf/go-ffmpreg/wasm" "github.com/tetratelabs/wazero" @@ -49,24 +49,19 @@ func initWASM(ctx context.Context) error { return nil } - var cfg wazero.RuntimeConfig - - // Allocate new runtime config, letting - // wazero determine compiler / interpreter. - cfg = wazero.NewRuntimeConfig() - - // Though still perform a check of CPU features at - // runtime to warn about slow interpreter performance. - if reason, supported := compilerSupported(); !supported { - log.Warn(ctx, "!!! WAZERO COMPILER MAY NOT BE AVAILABLE !!!"+ - " Reason: "+reason+"."+ - " Wazero will likely fall back to interpreter mode,"+ - " resulting in poor performance for media processing (and SQLite, if in use)."+ - " For more info and possible workarounds, please check:"+ - " https://docs.gotosocial.org/en/latest/getting_started/releases/#supported-platforms", - ) + // Check at runtime whether Wazero compiler support is available, + // interpreter mode is too slow for a usable gotosocial experience. + if reason, supported := isCompilerSupported(); !supported { + return errors.New("!!! WAZERO COMPILER SUPPORT NOT AVAILABLE !!!" + + " Reason: " + reason + "." + + " Wazero in interpreter mode is too slow to use ffmpeg" + + " (this will also affect SQLite if in use)." + + " For more info and possible workarounds, please check: https://docs.gotosocial.org/en/latest/getting_started/releases/#supported-platforms") } + // Allocate new runtime compiler config. + cfg := wazero.NewRuntimeConfigCompiler() + if dir := os.Getenv("GTS_WAZERO_COMPILATION_CACHE"); dir != "" { // Use on-filesystem compilation cache given by env. cache, err := wazero.NewCompilationCacheWithDir(dir) @@ -88,7 +83,7 @@ func initWASM(ctx context.Context) error { defer func() { if err == nil && set { // Drop binary. - embed.B = nil + embed.Free() return } @@ -110,7 +105,7 @@ func initWASM(ctx context.Context) error { } // Compile ffmpreg WebAssembly into memory. - mod, err = run.CompileModule(ctx, embed.B) + mod, err = run.CompileModule(ctx, embed.B()) if err != nil { return err } @@ -128,7 +123,7 @@ func initWASM(ctx context.Context) error { return nil } -func compilerSupported() (string, bool) { +func isCompilerSupported() (string, bool) { switch runtime.GOOS { case "linux", "android", "windows", "darwin", @@ -141,10 +136,11 @@ func compilerSupported() (string, bool) { switch runtime.GOARCH { case "amd64": // NOTE: wazero in the future may decouple the - // requirement of simd (sse4_1) from requirements + // requirement of simd (sse4_1+2) from requirements // for compiler support in the future, but even // still our module go-ffmpreg makes use of them. - return "amd64 SSE4.1 required", cpu.X86.HasSSE41 + return "amd64 x86-64-v2 required (see: https://en.wikipedia.org/wiki/X86-64-v2)", + cpu.Initialized && cpu.X86.HasSSE3 && cpu.X86.HasSSE41 && cpu.X86.HasSSE42 case "arm64": // NOTE: this particular check may change if we // later update go-ffmpreg to a version that makes diff --git a/internal/media/metadata.go b/internal/media/metadata.go index 44b1a87b6..c1fa58645 100644 --- a/internal/media/metadata.go +++ b/internal/media/metadata.go @@ -74,20 +74,28 @@ func clearMetadata(ctx context.Context, filepath string) error { // terminateExif cleans exif data from file at input path, into file // at output path, using given file extension to determine cleaning type. -func terminateExif(outpath, inpath string, ext string) error { +func terminateExif(outpath, inpath string, ext string) (err error) { + var inFile *os.File + var outFile *os.File + + // Ensure handles + // closed on return. + defer func() { + outFile.Close() + inFile.Close() + }() + // Open input file at given path. - inFile, err := os.Open(inpath) + inFile, err = openRead(inpath) if err != nil { return gtserror.Newf("error opening input file %s: %w", inpath, err) } - defer inFile.Close() - // Open output file at given path. - outFile, err := os.Create(outpath) + // Create output file at given path. + outFile, err = openWrite(outpath) if err != nil { return gtserror.Newf("error opening output file %s: %w", outpath, err) } - defer outFile.Close() // Terminate EXIF data from 'inFile' -> 'outFile'. err = terminator.TerminateInto(outFile, inFile, ext) diff --git a/internal/media/probe.go b/internal/media/probe.go index 791b6a8c2..c66254b90 100644 --- a/internal/media/probe.go +++ b/internal/media/probe.go @@ -38,8 +38,9 @@ const ( // probe will first attempt to probe the file at path using native Go code // (for performance), but falls back to using ffprobe to retrieve media details. func probe(ctx context.Context, filepath string) (*result, error) { + // Open input file at given path. - file, err := os.Open(filepath) + file, err := openRead(filepath) if err != nil { return nil, gtserror.Newf("error opening file %s: %w", filepath, err) } @@ -80,6 +81,7 @@ func probe(ctx context.Context, filepath string) (*result, error) { // probeJPEG decodes the given file as JPEG and determines // image details from the decoded JPEG using native Go code. func probeJPEG(file *os.File) (*result, error) { + // Attempt to decode JPEG, adding back hdr magic. cfg, err := jpeg.DecodeConfig(io.MultiReader( strings.NewReader(magicJPEG), diff --git a/internal/media/thumbnail.go b/internal/media/thumbnail.go index d9a2e522a..5fccaf5ce 100644 --- a/internal/media/thumbnail.go +++ b/internal/media/thumbnail.go @@ -24,7 +24,6 @@ import ( "image/jpeg" "image/png" "io" - "os" "strings" "code.superseriousbusiness.org/gotosocial/internal/gtserror" @@ -89,8 +88,8 @@ func generateThumb( // Default type is webp. mimeType = "image/webp" - // Generate thumb output path REPLACING extension. - if i := strings.IndexByte(filepath, '.'); i != -1 { + // Generate thumb output path REPLACING file extension. + if i := strings.LastIndexByte(filepath, '.'); i != -1 { outpath = filepath[:i] + "_thumb.webp" ext = filepath[i+1:] // old extension } else { @@ -231,7 +230,7 @@ func generateNativeThumb( error, ) { // Open input file at given path. - infile, err := os.Open(inpath) + infile, err := openRead(inpath) if err != nil { return "", gtserror.Newf("error opening input file %s: %w", inpath, err) } @@ -272,7 +271,7 @@ func generateNativeThumb( ) // Open output file at given path. - outfile, err := os.Create(outpath) + outfile, err := openWrite(outpath) if err != nil { return "", gtserror.Newf("error opening output file %s: %w", outpath, err) } @@ -313,8 +312,9 @@ func generateNativeThumb( // generateWebpBlurhash generates a blurhash for Webp at filepath. func generateWebpBlurhash(filepath string) (string, error) { + // Open the file at given path. - file, err := os.Open(filepath) + file, err := openRead(filepath) if err != nil { return "", gtserror.Newf("error opening input file %s: %w", filepath, err) } diff --git a/internal/media/util.go b/internal/media/util.go index ea52b415b..d73206434 100644 --- a/internal/media/util.go +++ b/internal/media/util.go @@ -30,14 +30,41 @@ import ( "codeberg.org/gruf/go-iotools" ) +// media processing tmpdir. +var tmpdir = os.TempDir() + // file represents one file // with the given flag and perms. type file struct { - abs string + abs string // absolute file path, including root + dir string // containing directory of abs + rel string // relative to root, i.e. trim_prefix(abs, dir) flag int perm os.FileMode } +// allowRead returns a new file{} for filepath permitted only to read. +func allowRead(filepath string) file { + return newFile(filepath, os.O_RDONLY, 0) +} + +// allowCreate returns a new file{} for filepath permitted to read / write / create. +func allowCreate(filepath string) file { + return newFile(filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) +} + +// newFile returns a new instance of file{} for given path and open args. +func newFile(filepath string, flag int, perms os.FileMode) file { + dir, rel := path.Split(filepath) + return file{ + abs: filepath, + rel: rel, + dir: dir, + flag: flag, + perm: perms, + } +} + // allowFiles implements fs.FS to allow // access to a specified slice of files. type allowFiles []file @@ -45,36 +72,32 @@ type allowFiles []file // Open implements fs.FS. func (af allowFiles) Open(name string) (fs.File, error) { for _, file := range af { - var ( - abs = file.abs - flag = file.flag - perm = file.perm - ) - + switch name { // Allowed to open file - // at absolute path. - if name == file.abs { - return os.OpenFile(abs, flag, perm) - } + // at absolute path, or + // relative as ffmpeg likes. + case file.abs, file.rel: + return os.OpenFile(file.abs, file.flag, file.perm) - // Check for other valid reads. - thisDir, thisFile := path.Split(file.abs) - - // Allowed to read directory itself. - if name == thisDir || name == "." { - return os.OpenFile(thisDir, flag, perm) - } - - // Allowed to read file - // itself (at relative path). - if name == thisFile { - return os.OpenFile(abs, flag, perm) + // Ffmpeg likes to read containing + // dir as '.'. Allow RO access here. + case ".": + return openRead(file.dir) } } - return nil, os.ErrPermission } +// openRead opens the existing file at path for reads only. +func openRead(path string) (*os.File, error) { + return os.OpenFile(path, os.O_RDONLY, 0) +} + +// openWrite opens the (new!) file at path for read / writes. +func openWrite(path string) (*os.File, error) { + return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) +} + // getExtension splits file extension from path. func getExtension(path string) string { for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- { @@ -93,17 +116,24 @@ func getExtension(path string) string { // chance that Linux's sendfile syscall can be utilised for optimal // draining of data source to temporary file storage. func drainToTmp(rc io.ReadCloser) (string, error) { - defer rc.Close() + var tmp *os.File + var err error + + // Close handles + // on func return. + defer func() { + tmp.Close() + rc.Close() + }() // Open new temporary file. - tmp, err := os.CreateTemp( - os.TempDir(), + tmp, err = os.CreateTemp( + tmpdir, "gotosocial-*", ) if err != nil { return "", err } - defer tmp.Close() // Extract file path. path := tmp.Name() diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/embed/lib.go b/vendor/codeberg.org/gruf/go-ffmpreg/embed/lib.go index 7829b5524..b39d7d10e 100644 --- a/vendor/codeberg.org/gruf/go-ffmpreg/embed/lib.go +++ b/vendor/codeberg.org/gruf/go-ffmpreg/embed/lib.go @@ -1,39 +1,46 @@ package embed import ( - "bytes" "compress/gzip" _ "embed" "io" - "os" + "strings" ) func init() { var err error - if path := os.Getenv("FFMPREG_WASM"); path != "" { - // Read file into memory. - B, err = os.ReadFile(path) - if err != nil { - panic(err) - } - } - // Wrap bytes in reader. - b := bytes.NewReader(B) + r := strings.NewReader(s) // Create unzipper from reader. - gz, err := gzip.NewReader(b) + gz, err := gzip.NewReader(r) if err != nil { panic(err) } // Extract gzipped binary. - B, err = io.ReadAll(gz) + b, err := io.ReadAll(gz) if err != nil { panic(err) } + + // Set binary. + s = string(b) } +// B returns a copy of +// embedded binary data. +func B() []byte { + if s == "" { + panic("binary already dropped from memory") + } + return []byte(s) +} + +// Free will drop embedded +// binary from runtime mem. +func Free() { s = "" } + //go:embed ffmpreg.wasm.gz -var B []byte +var s string diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/funcs.go b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/funcs.go index a809ff120..a0a199ca1 100644 --- a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/funcs.go +++ b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/funcs.go @@ -9,22 +9,93 @@ import ( type snapshotskey struct{} +type snapshotctx struct { + context.Context + snaps *snapshots +} + +func (ctx snapshotctx) Value(key any) any { + if _, ok := key.(snapshotskey); ok { + return ctx.snaps + } + return ctx.Context.Value(key) +} + +const ringsz uint = 8 + +type snapshots struct { + r [ringsz]struct { + eptr uint32 + snap experimental.Snapshot + } + n uint +} + +func (s *snapshots) get(envptr uint32) experimental.Snapshot { + start := (s.n % ringsz) + + for i := start; i != ^uint(0); i-- { + if s.r[i].eptr == envptr { + snap := s.r[i].snap + s.r[i].eptr = 0 + s.r[i].snap = nil + s.n = i - 1 + return snap + } + } + + for i := ringsz - 1; i > start; i-- { + if s.r[i].eptr == envptr { + snap := s.r[i].snap + s.r[i].eptr = 0 + s.r[i].snap = nil + s.n = i - 1 + return snap + } + } + + panic("snapshot not found") +} + +func (s *snapshots) set(envptr uint32, snapshot experimental.Snapshot) { + start := (s.n % ringsz) + + for i := start; i < ringsz; i++ { + switch s.r[i].eptr { + case 0, envptr: + s.r[i].eptr = envptr + s.r[i].snap = snapshot + s.n = i + return + } + } + + for i := uint(0); i < start; i++ { + switch s.r[i].eptr { + case 0, envptr: + s.r[i].eptr = envptr + s.r[i].snap = snapshot + s.n = i + return + } + } + + panic("snapshots full") +} + // withSetjmpLongjmp updates the context to contain wazero/experimental.Snapshotter{} support, // and embeds the necessary snapshots map required for later calls to Setjmp() / Longjmp(). func withSetjmpLongjmp(ctx context.Context) context.Context { - snapshots := make(map[uint32]experimental.Snapshot, 10) - ctx = experimental.WithSnapshotter(ctx) - ctx = context.WithValue(ctx, snapshotskey{}, snapshots) - return ctx + return snapshotctx{Context: experimental.WithSnapshotter(ctx), snaps: new(snapshots)} } -func getSnapshots(ctx context.Context) map[uint32]experimental.Snapshot { - v, _ := ctx.Value(snapshotskey{}).(map[uint32]experimental.Snapshot) +func getSnapshots(ctx context.Context) *snapshots { + v, _ := ctx.Value(snapshotskey{}).(*snapshots) return v } // setjmp implements the C function: setjmp(env jmp_buf) -func setjmp(ctx context.Context, mod api.Module, stack []uint64) { +func setjmp(ctx context.Context, _ api.Module, stack []uint64) { // Input arguments. envptr := api.DecodeU32(stack[0]) @@ -35,19 +106,16 @@ func setjmp(ctx context.Context, mod api.Module, stack []uint64) { // Get stored snapshots map. snapshots := getSnapshots(ctx) - if snapshots == nil { - panic("setjmp / longjmp not supported") - } // Set latest snapshot in map. - snapshots[envptr] = snapshot + snapshots.set(envptr, snapshot) // Set return. stack[0] = 0 } // longjmp implements the C function: int longjmp(env jmp_buf, value int) -func longjmp(ctx context.Context, mod api.Module, stack []uint64) { +func longjmp(ctx context.Context, _ api.Module, stack []uint64) { // Input arguments. envptr := api.DecodeU32(stack[0]) @@ -60,10 +128,7 @@ func longjmp(ctx context.Context, mod api.Module, stack []uint64) { } // Get snapshot stored in map. - snapshot := snapshots[envptr] - if snapshot == nil { - panic("must first call setjmp") - } + snapshot := snapshots.get(envptr) // Set return. stack[0] = 0 diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/run.go b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/run.go index 7b07d851d..c247abaf0 100644 --- a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/run.go +++ b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/run.go @@ -53,6 +53,7 @@ func Run( modcfg = modcfg.WithStdin(args.Stdin) modcfg = modcfg.WithStdout(args.Stdout) modcfg = modcfg.WithStderr(args.Stderr) + modcfg = modcfg.WithName("") if args.Config != nil { // Pass through config fn. diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/runtime.go b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/runtime.go index 328a26193..ca13bf775 100644 --- a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/runtime.go +++ b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/runtime.go @@ -28,6 +28,7 @@ func NewRuntime(ctx context.Context, cfg wazero.RuntimeConfig) (wazero.Runtime, // Set core features ffmpeg compiled with. cfg = cfg.WithCoreFeatures(CoreFeatures) + cfg = cfg.WithDebugInfoEnabled(false) // Instantiate runtime with prepared config. rt := wazero.NewRuntimeWithConfig(ctx, cfg) diff --git a/vendor/modules.txt b/vendor/modules.txt index ac4bab587..7d2b53ad4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -247,7 +247,7 @@ codeberg.org/gruf/go-fastcopy # codeberg.org/gruf/go-fastpath/v2 v2.0.0 ## explicit; go 1.14 codeberg.org/gruf/go-fastpath/v2 -# codeberg.org/gruf/go-ffmpreg v0.6.11 +# codeberg.org/gruf/go-ffmpreg v0.6.12 ## explicit; go 1.22.0 codeberg.org/gruf/go-ffmpreg/embed codeberg.org/gruf/go-ffmpreg/wasm