[feature] support processing of (many) more media types (#3090)
* initial work replacing our media decoding / encoding pipeline with ffprobe + ffmpeg
* specify the video codec to use when generating static image from emoji
* update go-storage library (fixes incompatibility after updating go-iotools)
* maintain image aspect ratio when generating a thumbnail for it
* update readme to show go-ffmpreg
* fix a bunch of media tests, move filesize checking to callers of media manager for more flexibility
* remove extra debug from error message
* fix up incorrect function signatures
* update PutFile to just use regular file copy, as changes are file is on separate partition
* fix remaining tests, remove some unneeded tests now we're working with ffmpeg/ffprobe
* update more tests, add more code comments
* add utilities to generate processed emoji / media outputs
* fix remaining tests
* add test for opus media file, add license header to utility cmds
* limit the number of concurrently available ffmpeg / ffprobe instances
* reduce number of instances
* further reduce number of instances
* fix envparsing test with configuration variables
* update docs and configuration with new media-{local,remote}-max-size variables
313
internal/media/ffmpeg.go
Normal file
|
|
@ -0,0 +1,313 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package media
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"codeberg.org/gruf/go-byteutil"
|
||||
|
||||
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||
_ffmpeg "github.com/superseriousbusiness/gotosocial/internal/media/ffmpeg"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/tetratelabs/wazero"
|
||||
)
|
||||
|
||||
// ffmpegClearMetadata generates a copy (in-place) of input media with all metadata cleared.
|
||||
func ffmpegClearMetadata(ctx context.Context, filepath string, ext string) error {
|
||||
// Get directory from filepath.
|
||||
dirpath := path.Dir(filepath)
|
||||
|
||||
// Generate output file path with ext.
|
||||
outpath := filepath + "_cleaned." + ext
|
||||
|
||||
// Clear metadata with ffmpeg.
|
||||
if err := ffmpeg(ctx, dirpath,
|
||||
"-loglevel", "error",
|
||||
"-i", filepath,
|
||||
"-map_metadata", "-1",
|
||||
"-codec", "copy",
|
||||
"-y",
|
||||
outpath,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move the new output file path to original location.
|
||||
if err := os.Rename(outpath, filepath); err != nil {
|
||||
return gtserror.Newf("error renaming %s: %w", outpath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ffmpegGenerateThumb generates a thumbnail jpeg from input media of any type, useful for any media.
|
||||
func ffmpegGenerateThumb(ctx context.Context, filepath string, width, height int) (string, error) {
|
||||
// Get directory from filepath.
|
||||
dirpath := path.Dir(filepath)
|
||||
|
||||
// Generate output frame file path.
|
||||
outpath := filepath + "_thumb.jpg"
|
||||
|
||||
// Generate thumb with ffmpeg.
|
||||
if err := ffmpeg(ctx, dirpath,
|
||||
"-loglevel", "error",
|
||||
"-i", filepath,
|
||||
"-filter:v", "thumbnail=n=10",
|
||||
"-filter:v", "scale="+strconv.Itoa(width)+":"+strconv.Itoa(height),
|
||||
"-qscale:v", "12", // ~ 70% quality
|
||||
"-frames:v", "1",
|
||||
"-y",
|
||||
outpath,
|
||||
); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return outpath, nil
|
||||
}
|
||||
|
||||
// ffmpegGenerateStatic generates a static png from input image of any type, useful for emoji.
|
||||
func ffmpegGenerateStatic(ctx context.Context, filepath string) (string, error) {
|
||||
// Get directory from filepath.
|
||||
dirpath := path.Dir(filepath)
|
||||
|
||||
// Generate output static file path.
|
||||
outpath := filepath + "_static.png"
|
||||
|
||||
// Generate static with ffmpeg.
|
||||
if err := ffmpeg(ctx, dirpath,
|
||||
"-loglevel", "error",
|
||||
"-i", filepath,
|
||||
"-codec:v", "png", // specifically NOT 'apng'
|
||||
"-frames:v", "1", // in case animated, only take 1 frame
|
||||
"-y",
|
||||
outpath,
|
||||
); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return outpath, nil
|
||||
}
|
||||
|
||||
// ffmpeg calls `ffmpeg [args...]` (WASM) with directory path mounted in runtime.
|
||||
func ffmpeg(ctx context.Context, dirpath string, args ...string) error {
|
||||
var stderr byteutil.Buffer
|
||||
rc, err := _ffmpeg.Ffmpeg(ctx, wasm.Args{
|
||||
Stderr: &stderr,
|
||||
Args: args,
|
||||
Config: func(modcfg wazero.ModuleConfig) wazero.ModuleConfig {
|
||||
fscfg := wazero.NewFSConfig()
|
||||
fscfg = fscfg.WithDirMount(dirpath, dirpath)
|
||||
modcfg = modcfg.WithFSConfig(fscfg)
|
||||
return modcfg
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return gtserror.Newf("error running: %w", err)
|
||||
} else if rc != 0 {
|
||||
return gtserror.Newf("non-zero return code %d (%s)", rc, stderr.B)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ffprobe calls `ffprobe` (WASM) on filepath, returning parsed JSON output.
|
||||
func ffprobe(ctx context.Context, filepath string) (*ffprobeResult, error) {
|
||||
var stdout byteutil.Buffer
|
||||
|
||||
// Get directory from filepath.
|
||||
dirpath := path.Dir(filepath)
|
||||
|
||||
// Run ffprobe on our given file at path.
|
||||
_, err := _ffmpeg.Ffprobe(ctx, wasm.Args{
|
||||
Stdout: &stdout,
|
||||
|
||||
Args: []string{
|
||||
"-i", filepath,
|
||||
"-loglevel", "quiet",
|
||||
"-print_format", "json",
|
||||
"-show_streams",
|
||||
"-show_format",
|
||||
"-show_error",
|
||||
},
|
||||
|
||||
Config: func(modcfg wazero.ModuleConfig) wazero.ModuleConfig {
|
||||
fscfg := wazero.NewFSConfig()
|
||||
fscfg = fscfg.WithReadOnlyDirMount(dirpath, dirpath)
|
||||
modcfg = modcfg.WithFSConfig(fscfg)
|
||||
return modcfg
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, gtserror.Newf("error running: %w", err)
|
||||
}
|
||||
|
||||
var result ffprobeResult
|
||||
|
||||
// Unmarshal the ffprobe output as our result type.
|
||||
if err := json.Unmarshal(stdout.B, &result); err != nil {
|
||||
return nil, gtserror.Newf("error unmarshaling json: %w", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// ffprobeResult contains parsed JSON data from
|
||||
// result of calling `ffprobe` on a media file.
|
||||
type ffprobeResult struct {
|
||||
Streams []ffprobeStream `json:"streams"`
|
||||
Format *ffprobeFormat `json:"format"`
|
||||
Error *ffprobeError `json:"error"`
|
||||
}
|
||||
|
||||
// ImageMeta extracts image metadata contained within ffprobe'd media result streams.
|
||||
func (res *ffprobeResult) ImageMeta() (width int, height int, err error) {
|
||||
for _, stream := range res.Streams {
|
||||
if stream.Width > width {
|
||||
width = stream.Width
|
||||
}
|
||||
if stream.Height > height {
|
||||
height = stream.Height
|
||||
}
|
||||
}
|
||||
if width == 0 || height == 0 {
|
||||
err = errors.New("invalid image stream(s)")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// VideoMeta extracts video metadata contained within ffprobe'd media result streams.
|
||||
func (res *ffprobeResult) VideoMeta() (width, height int, framerate float32, err error) {
|
||||
for _, stream := range res.Streams {
|
||||
if stream.Width > width {
|
||||
width = stream.Width
|
||||
}
|
||||
if stream.Height > height {
|
||||
height = stream.Height
|
||||
}
|
||||
if fr := stream.GetFrameRate(); fr > 0 {
|
||||
if framerate == 0 || fr < framerate {
|
||||
framerate = fr
|
||||
}
|
||||
}
|
||||
}
|
||||
if width == 0 || height == 0 || framerate == 0 {
|
||||
err = errors.New("invalid video stream(s)")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ffprobeStream struct {
|
||||
CodecName string `json:"codec_name"`
|
||||
AvgFrameRate string `json:"avg_frame_rate"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
// + unused fields.
|
||||
}
|
||||
|
||||
// GetFrameRate calculates float32 framerate value from stream json string.
|
||||
func (str *ffprobeStream) GetFrameRate() float32 {
|
||||
if str.AvgFrameRate != "" {
|
||||
var (
|
||||
// numerator
|
||||
num float32
|
||||
|
||||
// denominator
|
||||
den float32
|
||||
)
|
||||
|
||||
// Check for a provided inequality, i.e. numerator / denominator.
|
||||
if p := strings.SplitN(str.AvgFrameRate, "/", 2); len(p) == 2 {
|
||||
n, _ := strconv.ParseFloat(p[0], 32)
|
||||
d, _ := strconv.ParseFloat(p[1], 32)
|
||||
num, den = float32(n), float32(d)
|
||||
} else {
|
||||
n, _ := strconv.ParseFloat(p[0], 32)
|
||||
num = float32(n)
|
||||
}
|
||||
|
||||
return num / den
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ffprobeFormat struct {
|
||||
Filename string `json:"filename"`
|
||||
FormatName string `json:"format_name"`
|
||||
Duration string `json:"duration"`
|
||||
BitRate string `json:"bit_rate"`
|
||||
// + unused fields
|
||||
}
|
||||
|
||||
// GetFileType determines file type and extension to use for media data.
|
||||
func (fmt *ffprobeFormat) GetFileType() (gtsmodel.FileType, string) {
|
||||
switch fmt.FormatName {
|
||||
case "mov,mp4,m4a,3gp,3g2,mj2":
|
||||
return gtsmodel.FileTypeVideo, "mp4"
|
||||
case "apng":
|
||||
return gtsmodel.FileTypeImage, "apng"
|
||||
case "png_pipe":
|
||||
return gtsmodel.FileTypeImage, "png"
|
||||
case "image2", "jpeg_pipe":
|
||||
return gtsmodel.FileTypeImage, "jpeg"
|
||||
case "webp_pipe":
|
||||
return gtsmodel.FileTypeImage, "webp"
|
||||
case "gif":
|
||||
return gtsmodel.FileTypeImage, "gif"
|
||||
case "mp3":
|
||||
return gtsmodel.FileTypeAudio, "mp3"
|
||||
case "ogg":
|
||||
return gtsmodel.FileTypeAudio, "ogg"
|
||||
default:
|
||||
return gtsmodel.FileTypeUnknown, fmt.FormatName
|
||||
}
|
||||
}
|
||||
|
||||
// GetDuration calculates float32 framerate value from format json string.
|
||||
func (fmt *ffprobeFormat) GetDuration() float32 {
|
||||
if fmt.Duration != "" {
|
||||
dur, _ := strconv.ParseFloat(fmt.Duration, 32)
|
||||
return float32(dur)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetBitRate calculates uint64 bitrate value from format json string.
|
||||
func (fmt *ffprobeFormat) GetBitRate() uint64 {
|
||||
if fmt.BitRate != "" {
|
||||
r, _ := strconv.ParseUint(fmt.BitRate, 10, 64)
|
||||
return r
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ffprobeError struct {
|
||||
Code int `json:"code"`
|
||||
String string `json:"string"`
|
||||
}
|
||||
|
||||
func (err *ffprobeError) Error() string {
|
||||
return err.String + " (" + strconv.Itoa(err.Code) + ")"
|
||||
}
|
||||
46
internal/media/ffmpeg/cache.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ffmpeg
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/tetratelabs/wazero"
|
||||
)
|
||||
|
||||
// shared WASM compilation cache.
|
||||
var cache wazero.CompilationCache
|
||||
|
||||
func initCache() {
|
||||
if cache != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if dir := os.Getenv("WAZERO_COMPILATION_CACHE"); dir != "" {
|
||||
var err error
|
||||
|
||||
// Use on-filesystem compilation cache given by env.
|
||||
cache, err = wazero.NewCompilationCacheWithDir(dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
// Use in-memory compilation cache.
|
||||
cache = wazero.NewCompilationCache()
|
||||
}
|
||||
}
|
||||
92
internal/media/ffmpeg/ffmpeg.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ffmpeg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ffmpeglib "codeberg.org/gruf/go-ffmpreg/embed/ffmpeg"
|
||||
"codeberg.org/gruf/go-ffmpreg/util"
|
||||
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||
|
||||
"github.com/tetratelabs/wazero"
|
||||
"github.com/tetratelabs/wazero/api"
|
||||
"github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1"
|
||||
)
|
||||
|
||||
// InitFfmpeg initializes the ffmpeg WebAssembly instance pool,
|
||||
// with given maximum limiting the number of concurrent instances.
|
||||
func InitFfmpeg(ctx context.Context, max int) error {
|
||||
initCache() // ensure compilation cache initialized
|
||||
return ffmpegPool.Init(ctx, max)
|
||||
}
|
||||
|
||||
// Ffmpeg runs the given arguments with an instance of ffmpeg.
|
||||
func Ffmpeg(ctx context.Context, args wasm.Args) (uint32, error) {
|
||||
return ffmpegPool.Run(ctx, args)
|
||||
}
|
||||
|
||||
var ffmpegPool = wasmInstancePool{
|
||||
inst: wasm.Instantiator{
|
||||
|
||||
// WASM module name.
|
||||
Module: "ffmpeg",
|
||||
|
||||
// Per-instance WebAssembly runtime (with shared cache).
|
||||
Runtime: func(ctx context.Context) wazero.Runtime {
|
||||
|
||||
// Prepare config with cache.
|
||||
cfg := wazero.NewRuntimeConfig()
|
||||
cfg = cfg.WithCoreFeatures(ffmpeglib.CoreFeatures)
|
||||
cfg = cfg.WithCompilationCache(cache)
|
||||
|
||||
// Instantiate runtime with our config.
|
||||
rt := wazero.NewRuntimeWithConfig(ctx, cfg)
|
||||
|
||||
// Prepare default "env" host module.
|
||||
env := rt.NewHostModuleBuilder("env")
|
||||
env = env.NewFunctionBuilder().
|
||||
WithGoModuleFunction(
|
||||
api.GoModuleFunc(util.Wasm_Tempnam),
|
||||
[]api.ValueType{api.ValueTypeI32, api.ValueTypeI32},
|
||||
[]api.ValueType{api.ValueTypeI32},
|
||||
).
|
||||
Export("tempnam")
|
||||
|
||||
// Instantiate "env" module in our runtime.
|
||||
_, err := env.Instantiate(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Instantiate the wasi snapshot preview 1 in runtime.
|
||||
_, err = wasi_snapshot_preview1.Instantiate(ctx, rt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return rt
|
||||
},
|
||||
|
||||
// Per-run module configuration.
|
||||
Config: wazero.NewModuleConfig,
|
||||
|
||||
// Embedded WASM.
|
||||
Source: ffmpeglib.B,
|
||||
},
|
||||
}
|
||||
92
internal/media/ffmpeg/ffprobe.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ffmpeg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ffprobelib "codeberg.org/gruf/go-ffmpreg/embed/ffprobe"
|
||||
"codeberg.org/gruf/go-ffmpreg/util"
|
||||
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||
|
||||
"github.com/tetratelabs/wazero"
|
||||
"github.com/tetratelabs/wazero/api"
|
||||
"github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1"
|
||||
)
|
||||
|
||||
// InitFfprobe initializes the ffprobe WebAssembly instance pool,
|
||||
// with given maximum limiting the number of concurrent instances.
|
||||
func InitFfprobe(ctx context.Context, max int) error {
|
||||
initCache() // ensure compilation cache initialized
|
||||
return ffprobePool.Init(ctx, max)
|
||||
}
|
||||
|
||||
// Ffprobe runs the given arguments with an instance of ffprobe.
|
||||
func Ffprobe(ctx context.Context, args wasm.Args) (uint32, error) {
|
||||
return ffprobePool.Run(ctx, args)
|
||||
}
|
||||
|
||||
var ffprobePool = wasmInstancePool{
|
||||
inst: wasm.Instantiator{
|
||||
|
||||
// WASM module name.
|
||||
Module: "ffprobe",
|
||||
|
||||
// Per-instance WebAssembly runtime (with shared cache).
|
||||
Runtime: func(ctx context.Context) wazero.Runtime {
|
||||
|
||||
// Prepare config with cache.
|
||||
cfg := wazero.NewRuntimeConfig()
|
||||
cfg = cfg.WithCoreFeatures(ffprobelib.CoreFeatures)
|
||||
cfg = cfg.WithCompilationCache(cache)
|
||||
|
||||
// Instantiate runtime with our config.
|
||||
rt := wazero.NewRuntimeWithConfig(ctx, cfg)
|
||||
|
||||
// Prepare default "env" host module.
|
||||
env := rt.NewHostModuleBuilder("env")
|
||||
env = env.NewFunctionBuilder().
|
||||
WithGoModuleFunction(
|
||||
api.GoModuleFunc(util.Wasm_Tempnam),
|
||||
[]api.ValueType{api.ValueTypeI32, api.ValueTypeI32},
|
||||
[]api.ValueType{api.ValueTypeI32},
|
||||
).
|
||||
Export("tempnam")
|
||||
|
||||
// Instantiate "env" module in our runtime.
|
||||
_, err := env.Instantiate(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Instantiate the wasi snapshot preview 1 in runtime.
|
||||
_, err = wasi_snapshot_preview1.Instantiate(ctx, rt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return rt
|
||||
},
|
||||
|
||||
// Per-run module configuration.
|
||||
Config: wazero.NewModuleConfig,
|
||||
|
||||
// Embedded WASM.
|
||||
Source: ffprobelib.B,
|
||||
},
|
||||
}
|
||||
75
internal/media/ffmpeg/pool.go
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ffmpeg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"codeberg.org/gruf/go-ffmpreg/wasm"
|
||||
)
|
||||
|
||||
// wasmInstancePool wraps a wasm.Instantiator{} and a
|
||||
// channel of wasm.Instance{}s to provide a concurrency
|
||||
// safe pool of WebAssembly module instances capable of
|
||||
// compiling new instances on-the-fly, with a predetermined
|
||||
// maximum number of concurrent instances at any one time.
|
||||
type wasmInstancePool struct {
|
||||
inst wasm.Instantiator
|
||||
pool chan *wasm.Instance
|
||||
}
|
||||
|
||||
func (p *wasmInstancePool) Init(ctx context.Context, sz int) error {
|
||||
p.pool = make(chan *wasm.Instance, sz)
|
||||
for i := 0; i < sz; i++ {
|
||||
inst, err := p.inst.New(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.pool <- inst
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *wasmInstancePool) Run(ctx context.Context, args wasm.Args) (uint32, error) {
|
||||
var inst *wasm.Instance
|
||||
|
||||
select {
|
||||
// Context canceled.
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
|
||||
// Acquire instance.
|
||||
case inst = <-p.pool:
|
||||
|
||||
// Ensure instance is
|
||||
// ready for running.
|
||||
if inst.IsClosed() {
|
||||
var err error
|
||||
inst, err = p.inst.New(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Release instance to pool on end.
|
||||
defer func() { p.pool <- inst }()
|
||||
|
||||
// Pass args to instance.
|
||||
return inst.Run(ctx, args)
|
||||
}
|
||||
|
|
@ -1,189 +0,0 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package media
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/draw"
|
||||
"image/jpeg"
|
||||
"image/png"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/buckket/go-blurhash"
|
||||
"github.com/disintegration/imaging"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/iotools"
|
||||
|
||||
// import to init webp encode/decoding.
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
var (
|
||||
// pngEncoder provides our global PNG encoding with
|
||||
// specified compression level, and memory pooled buffers.
|
||||
pngEncoder = png.Encoder{
|
||||
CompressionLevel: png.DefaultCompression,
|
||||
BufferPool: &pngEncoderBufferPool{},
|
||||
}
|
||||
|
||||
// jpegBufferPool is a memory pool
|
||||
// of byte buffers for JPEG encoding.
|
||||
jpegBufferPool sync.Pool
|
||||
)
|
||||
|
||||
// gtsImage is a thin wrapper around the standard library image
|
||||
// interface to provide our own useful helper functions for image
|
||||
// size and aspect ratio calculations, streamed encoding to various
|
||||
// types, and creating reduced size thumbnail images.
|
||||
type gtsImage struct{ image image.Image }
|
||||
|
||||
// blankImage generates a blank image of given dimensions.
|
||||
func blankImage(width int, height int) *gtsImage {
|
||||
// create a rectangle with the same dimensions as the video
|
||||
img := image.NewRGBA(image.Rect(0, 0, width, height))
|
||||
|
||||
// fill the rectangle with our desired fill color.
|
||||
draw.Draw(img, img.Bounds(), &image.Uniform{
|
||||
color.RGBA{42, 43, 47, 0},
|
||||
}, image.Point{}, draw.Src)
|
||||
|
||||
return >sImage{image: img}
|
||||
}
|
||||
|
||||
// decodeImage will decode image from reader stream and return image wrapped in our own gtsImage{} type.
|
||||
func decodeImage(r io.Reader, opts ...imaging.DecodeOption) (*gtsImage, error) {
|
||||
img, err := imaging.Decode(r, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return >sImage{image: img}, nil
|
||||
}
|
||||
|
||||
// Width returns the image width in pixels.
|
||||
func (m *gtsImage) Width() int {
|
||||
return m.image.Bounds().Size().X
|
||||
}
|
||||
|
||||
// Height returns the image height in pixels.
|
||||
func (m *gtsImage) Height() int {
|
||||
return m.image.Bounds().Size().Y
|
||||
}
|
||||
|
||||
// Size returns the total number of image pixels.
|
||||
func (m *gtsImage) Size() int {
|
||||
return m.image.Bounds().Size().X *
|
||||
m.image.Bounds().Size().Y
|
||||
}
|
||||
|
||||
// AspectRatio returns the image ratio of width:height.
|
||||
func (m *gtsImage) AspectRatio() float32 {
|
||||
|
||||
// note: we cast bounds to float64 to prevent truncation
|
||||
// and only at the end aspect ratio do we cast to float32
|
||||
// (as the sizes are likely to be much larger than ratio).
|
||||
return float32(float64(m.image.Bounds().Size().X) /
|
||||
float64(m.image.Bounds().Size().Y))
|
||||
}
|
||||
|
||||
// Thumbnail returns a small sized copy of gtsImage{}, limited to 512x512 if not small enough.
|
||||
func (m *gtsImage) Thumbnail() *gtsImage {
|
||||
const (
|
||||
// max thumb
|
||||
// dimensions.
|
||||
maxWidth = 512
|
||||
maxHeight = 512
|
||||
)
|
||||
|
||||
// Check the receiving image is within max thumnail bounds.
|
||||
if m.Width() <= maxWidth && m.Height() <= maxHeight {
|
||||
return >sImage{image: imaging.Clone(m.image)}
|
||||
}
|
||||
|
||||
// Image is too large, needs to be resized to thumbnail max.
|
||||
img := imaging.Fit(m.image, maxWidth, maxHeight, imaging.Linear)
|
||||
return >sImage{image: img}
|
||||
}
|
||||
|
||||
// Blurhash calculates the blurhash for the receiving image data.
|
||||
func (m *gtsImage) Blurhash() (string, error) {
|
||||
// for generating blurhashes, it's more cost effective to
|
||||
// lose detail since it's blurry, so make a tiny version.
|
||||
tiny := imaging.Resize(m.image, 32, 0, imaging.NearestNeighbor)
|
||||
|
||||
// Encode blurhash from resized version
|
||||
return blurhash.Encode(4, 3, tiny)
|
||||
}
|
||||
|
||||
// ToJPEG creates a new streaming JPEG encoder from receiving image, and a size ptr
|
||||
// which stores the number of bytes written during the image encoding process.
|
||||
func (m *gtsImage) ToJPEG(opts *jpeg.Options) io.Reader {
|
||||
return iotools.StreamWriteFunc(func(w io.Writer) error {
|
||||
// Get encoding buffer
|
||||
bw := getJPEGBuffer(w)
|
||||
|
||||
// Encode JPEG to buffered writer.
|
||||
err := jpeg.Encode(bw, m.image, opts)
|
||||
|
||||
// Replace buffer.
|
||||
//
|
||||
// NOTE: jpeg.Encode() already
|
||||
// performs a bufio.Writer.Flush().
|
||||
putJPEGBuffer(bw)
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// ToPNG creates a new streaming PNG encoder from receiving image, and a size ptr
|
||||
// which stores the number of bytes written during the image encoding process.
|
||||
func (m *gtsImage) ToPNG() io.Reader {
|
||||
return iotools.StreamWriteFunc(func(w io.Writer) error {
|
||||
return pngEncoder.Encode(w, m.image)
|
||||
})
|
||||
}
|
||||
|
||||
// getJPEGBuffer fetches a reset JPEG encoding buffer from global JPEG buffer pool.
|
||||
func getJPEGBuffer(w io.Writer) *bufio.Writer {
|
||||
v := jpegBufferPool.Get()
|
||||
if v == nil {
|
||||
v = bufio.NewWriter(nil)
|
||||
}
|
||||
buf := v.(*bufio.Writer)
|
||||
buf.Reset(w)
|
||||
return buf
|
||||
}
|
||||
|
||||
// putJPEGBuffer resets the given bufio writer and places in global JPEG buffer pool.
|
||||
func putJPEGBuffer(buf *bufio.Writer) {
|
||||
buf.Reset(nil)
|
||||
jpegBufferPool.Put(buf)
|
||||
}
|
||||
|
||||
// pngEncoderBufferPool implements png.EncoderBufferPool.
|
||||
type pngEncoderBufferPool sync.Pool
|
||||
|
||||
func (p *pngEncoderBufferPool) Get() *png.EncoderBuffer {
|
||||
buf, _ := (*sync.Pool)(p).Get().(*png.EncoderBuffer)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (p *pngEncoderBufferPool) Put(buf *png.EncoderBuffer) {
|
||||
(*sync.Pool)(p).Put(buf)
|
||||
}
|
||||
|
|
@ -314,21 +314,26 @@ func (m *Manager) RefreshEmoji(
|
|||
|
||||
// Since this is a refresh we will end up storing new images at new
|
||||
// paths, so we should wrap closer to delete old paths at completion.
|
||||
wrapped := func(ctx context.Context) (io.ReadCloser, int64, error) {
|
||||
wrapped := func(ctx context.Context) (io.ReadCloser, error) {
|
||||
|
||||
// Call original data func.
|
||||
rc, sz, err := data(ctx)
|
||||
// Call original func.
|
||||
rc, err := data(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap closer to cleanup old data.
|
||||
c := iotools.CloserFunc(func() error {
|
||||
// Cast as separated reader / closer types.
|
||||
rct, ok := rc.(*iotools.ReadCloserType)
|
||||
|
||||
// First try close original.
|
||||
if rc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
// Allocate new read closer type.
|
||||
rct = new(iotools.ReadCloserType)
|
||||
rct.Reader = rc
|
||||
rct.Closer = rc
|
||||
}
|
||||
|
||||
// Wrap underlying io.Closer type to cleanup old data.
|
||||
rct.Closer = iotools.CloserCallback(rct.Closer, func() {
|
||||
|
||||
// Remove any *old* emoji image file path now stream is closed.
|
||||
if err := m.state.Storage.Delete(ctx, oldPath); err != nil &&
|
||||
|
|
@ -341,12 +346,9 @@ func (m *Manager) RefreshEmoji(
|
|||
!storage.IsNotFound(err) {
|
||||
log.Errorf(ctx, "error deleting old static emoji %s from storage: %v", shortcodeDomain, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// Return newly wrapped readcloser and size.
|
||||
return iotools.ReadCloser(rc, c), sz, nil
|
||||
return rct, nil
|
||||
}
|
||||
|
||||
// Use a new ID to create a new path
|
||||
|
|
|
|||
|
|
@ -1,211 +0,0 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package media
|
||||
|
||||
/*
|
||||
The code in this file is taken from the following source:
|
||||
https://github.com/google/wuffs/blob/414a011491ff513b86d8694c5d71800f3cb5a715/script/strip-png-ancillary-chunks.go
|
||||
|
||||
It presents a workaround for this issue: https://github.com/golang/go/issues/43382
|
||||
|
||||
The license for the copied code is reproduced below:
|
||||
|
||||
Copyright 2021 The Wuffs Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// strip-png-ancillary-chunks.go copies PNG data from stdin to stdout, removing
|
||||
// any ancillary chunks.
|
||||
//
|
||||
// Specification-compliant PNG decoders are required to honor critical chunks
|
||||
// but may ignore ancillary (non-critical) chunks. Stripping out ancillary
|
||||
// chunks before decoding should mean that different PNG decoders will agree on
|
||||
// the decoded output regardless of which ancillary chunk types they choose to
|
||||
// honor. Specifically, some PNG decoders may implement color and gamma
|
||||
// correction but not all do.
|
||||
//
|
||||
// This program will strip out all ancillary chunks, but it should be
|
||||
// straightforward to copy-paste-and-modify it to strip out only certain chunk
|
||||
// types (e.g. only "tRNS" transparency chunks).
|
||||
//
|
||||
// --------
|
||||
//
|
||||
// A PNG file consists of an 8-byte magic identifier and then a series of
|
||||
// chunks. Each chunk is:
|
||||
//
|
||||
// - a 4-byte uint32 payload length N.
|
||||
// - a 4-byte chunk type (e.g. "gAMA" for gamma correction metadata).
|
||||
// - an N-byte payload.
|
||||
// - a 4-byte CRC-32 checksum of the previous (N + 4) bytes, including the
|
||||
// chunk type but excluding the payload length.
|
||||
//
|
||||
// Chunk types consist of 4 ASCII letters. The upper-case / lower-case bit of
|
||||
// the first letter denote critical or ancillary chunks: "IDAT" and "PLTE" are
|
||||
// critical, "gAMA" and "tEXt" are ancillary. See
|
||||
// https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-naming-conventions
|
||||
//
|
||||
// --------
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkTypeIHDR = 0x49484452
|
||||
chunkTypePLTE = 0x504C5445
|
||||
chunkTypeIDAT = 0x49444154
|
||||
chunkTypeIEND = 0x49454E44
|
||||
chunkTypeTRNS = 0x74524e53
|
||||
)
|
||||
|
||||
func isNecessaryChunkType(chunkType uint32) bool {
|
||||
switch chunkType {
|
||||
case chunkTypeIHDR:
|
||||
return true
|
||||
case chunkTypePLTE:
|
||||
return true
|
||||
case chunkTypeIDAT:
|
||||
return true
|
||||
case chunkTypeIEND:
|
||||
return true
|
||||
case chunkTypeTRNS:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// pngAncillaryChunkStripper wraps another io.Reader to strip ancillary chunks,
|
||||
// if the data is in the PNG file format. If the data isn't PNG, it is passed
|
||||
// through unmodified.
|
||||
type pngAncillaryChunkStripper struct {
|
||||
// Reader is the wrapped io.Reader.
|
||||
Reader io.Reader
|
||||
|
||||
// stickyErr is the first error returned from the wrapped io.Reader.
|
||||
stickyErr error
|
||||
|
||||
// buffer[rIndex:wIndex] holds data read from the wrapped io.Reader that
|
||||
// wasn't passed through yet.
|
||||
buffer [8]byte
|
||||
rIndex int
|
||||
wIndex int
|
||||
|
||||
// pending and discard is the number of remaining bytes for (and whether to
|
||||
// discard or pass through) the current chunk-in-progress.
|
||||
pending int64
|
||||
discard bool
|
||||
|
||||
// notPNG is set true if the data stream doesn't start with the 8-byte PNG
|
||||
// magic identifier. If true, the wrapped io.Reader's data (including the
|
||||
// first up-to-8 bytes) is passed through without modification.
|
||||
notPNG bool
|
||||
|
||||
// seenMagic is whether we've seen the 8-byte PNG magic identifier.
|
||||
seenMagic bool
|
||||
}
|
||||
|
||||
// Read implements io.Reader.
|
||||
func (r *pngAncillaryChunkStripper) Read(p []byte) (int, error) {
|
||||
for {
|
||||
// If the wrapped io.Reader returned a non-nil error, drain r.buffer
|
||||
// (what data we have) and return that error (if fully drained).
|
||||
if r.stickyErr != nil {
|
||||
n := copy(p, r.buffer[r.rIndex:r.wIndex])
|
||||
r.rIndex += n
|
||||
if r.rIndex < r.wIndex {
|
||||
return n, nil
|
||||
}
|
||||
return n, r.stickyErr
|
||||
}
|
||||
|
||||
// Handle trivial requests, including draining our buffer.
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
} else if r.rIndex < r.wIndex {
|
||||
n := copy(p, r.buffer[r.rIndex:r.wIndex])
|
||||
r.rIndex += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// From here onwards, our buffer is drained: r.rIndex == r.wIndex.
|
||||
|
||||
// Handle non-PNG input.
|
||||
if r.notPNG {
|
||||
return r.Reader.Read(p)
|
||||
}
|
||||
|
||||
// Continue processing any PNG chunk that's in progress, whether
|
||||
// discarding it or passing it through.
|
||||
for r.pending > 0 {
|
||||
if int64(len(p)) > r.pending {
|
||||
p = p[:r.pending]
|
||||
}
|
||||
n, err := r.Reader.Read(p)
|
||||
r.pending -= int64(n)
|
||||
r.stickyErr = err
|
||||
if r.discard {
|
||||
continue
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// We're either expecting the 8-byte PNG magic identifier or the 4-byte
|
||||
// PNG chunk length + 4-byte PNG chunk type. Either way, read 8 bytes.
|
||||
r.rIndex = 0
|
||||
r.wIndex, r.stickyErr = io.ReadFull(r.Reader, r.buffer[:8])
|
||||
if r.stickyErr != nil {
|
||||
// Undo io.ReadFull converting io.EOF to io.ErrUnexpectedEOF.
|
||||
if r.stickyErr == io.ErrUnexpectedEOF {
|
||||
r.stickyErr = io.EOF
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Process those 8 bytes, either:
|
||||
// - a PNG chunk (if we've already seen the PNG magic identifier),
|
||||
// - the PNG magic identifier itself (if the input is a PNG) or
|
||||
// - something else (if it's not a PNG).
|
||||
//nolint:gocritic
|
||||
if r.seenMagic {
|
||||
// The number of pending bytes is equal to (N + 4) because of the 4
|
||||
// byte trailer, a checksum.
|
||||
r.pending = int64(binary.BigEndian.Uint32(r.buffer[:4])) + 4
|
||||
chunkType := binary.BigEndian.Uint32(r.buffer[4:])
|
||||
r.discard = !isNecessaryChunkType(chunkType)
|
||||
if r.discard {
|
||||
r.rIndex = r.wIndex
|
||||
}
|
||||
} else if string(r.buffer[:8]) == "\x89PNG\x0D\x0A\x1A\x0A" {
|
||||
r.seenMagic = true
|
||||
} else {
|
||||
r.notPNG = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -18,16 +18,10 @@
|
|||
package media
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"codeberg.org/gruf/go-bytesize"
|
||||
errorsv2 "codeberg.org/gruf/go-errors/v2"
|
||||
"codeberg.org/gruf/go-runners"
|
||||
"github.com/h2non/filetype"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
|
|
@ -125,19 +119,8 @@ func (p *ProcessingEmoji) load(ctx context.Context) (
|
|||
// full-size media attachment details.
|
||||
//
|
||||
// This will update p.emoji as it goes.
|
||||
if err = p.store(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finish processing by reloading media into
|
||||
// memory to get dimension and generate a thumb.
|
||||
//
|
||||
// This will update p.emoji as it goes.
|
||||
if err = p.finish(ctx); err != nil {
|
||||
return err //nolint:revive
|
||||
}
|
||||
|
||||
return nil
|
||||
err = p.store(ctx)
|
||||
return err
|
||||
})
|
||||
emoji = p.emoji
|
||||
return
|
||||
|
|
@ -147,80 +130,66 @@ func (p *ProcessingEmoji) load(ctx context.Context) (
|
|||
// and updates the underlying attachment fields as necessary. It will then stream
|
||||
// bytes from p's reader directly into storage so that it can be retrieved later.
|
||||
func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||
// Load media from provided data fun
|
||||
rc, sz, err := p.dataFn(ctx)
|
||||
// Load media from data func.
|
||||
rc, err := p.dataFn(ctx)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error executing data function: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
// predfine temporary media
|
||||
// file path variables so we
|
||||
// can remove them on error.
|
||||
temppath string
|
||||
staticpath string
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// Ensure data reader gets closed on return.
|
||||
if err := rc.Close(); err != nil {
|
||||
log.Errorf(ctx, "error closing data reader: %v", err)
|
||||
if err := remove(temppath, staticpath); err != nil {
|
||||
log.Errorf(ctx, "error(s) cleaning up files: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var maxSize bytesize.Size
|
||||
|
||||
if p.emoji.IsLocal() {
|
||||
// this is a local emoji upload
|
||||
maxSize = config.GetMediaEmojiLocalMaxSize()
|
||||
} else {
|
||||
// this is a remote incoming emoji
|
||||
maxSize = config.GetMediaEmojiRemoteMaxSize()
|
||||
}
|
||||
|
||||
// Check that provided size isn't beyond max. We check beforehand
|
||||
// so that we don't attempt to stream the emoji into storage if not needed.
|
||||
if sz > 0 && sz > int64(maxSize) {
|
||||
sz := bytesize.Size(sz) // improves log readability
|
||||
return gtserror.Newf("given emoji size %s greater than max allowed %s", sz, maxSize)
|
||||
}
|
||||
|
||||
// Prepare to read bytes from
|
||||
// file header or magic number.
|
||||
fileSize := int(sz)
|
||||
hdrBuf := newHdrBuf(fileSize)
|
||||
|
||||
// Read into buffer as much as possible.
|
||||
//
|
||||
// UnexpectedEOF means we couldn't read up to the
|
||||
// given size, but we may still have read something.
|
||||
//
|
||||
// EOF means we couldn't read anything at all.
|
||||
//
|
||||
// Any other error likely means the connection messed up.
|
||||
//
|
||||
// In other words, rather counterintuitively, we
|
||||
// can only proceed on no error or unexpected error!
|
||||
n, err := io.ReadFull(rc, hdrBuf)
|
||||
// Drain reader to tmp file
|
||||
// (this reader handles close).
|
||||
temppath, err = drainToTmp(rc)
|
||||
if err != nil {
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
return gtserror.Newf("error reading first bytes of incoming media: %w", err)
|
||||
}
|
||||
|
||||
// Initial file size was misreported, so we didn't read
|
||||
// fully into hdrBuf. Reslice it to the size we did read.
|
||||
hdrBuf = hdrBuf[:n]
|
||||
fileSize = n
|
||||
p.emoji.ImageFileSize = fileSize
|
||||
return gtserror.Newf("error draining data to tmp: %w", err)
|
||||
}
|
||||
|
||||
// Parse file type info from header buffer.
|
||||
// This should only ever error if the buffer
|
||||
// is empty (ie., the attachment is 0 bytes).
|
||||
info, err := filetype.Match(hdrBuf)
|
||||
// Pass input file through ffprobe to
|
||||
// parse further metadata information.
|
||||
result, err := ffprobe(ctx, temppath)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error parsing file type: %w", err)
|
||||
return gtserror.Newf("error ffprobing data: %w", err)
|
||||
}
|
||||
|
||||
// Ensure supported emoji img type.
|
||||
if !slices.Contains(SupportedEmojiMIMETypes, info.MIME.Value) {
|
||||
return gtserror.Newf("unsupported emoji filetype: %s", info.Extension)
|
||||
switch {
|
||||
// No errors parsing data.
|
||||
case result.Error == nil:
|
||||
|
||||
// Data type unhandleable by ffprobe.
|
||||
case result.Error.Code == -1094995529:
|
||||
log.Warn(ctx, "unsupported data type")
|
||||
return nil
|
||||
|
||||
default:
|
||||
return gtserror.Newf("ffprobe error: %w", err)
|
||||
}
|
||||
|
||||
// Recombine header bytes with remaining stream
|
||||
r := io.MultiReader(bytes.NewReader(hdrBuf), rc)
|
||||
var ext string
|
||||
|
||||
// Set media type from ffprobe format data.
|
||||
fileType, ext := result.Format.GetFileType()
|
||||
if fileType != gtsmodel.FileTypeImage {
|
||||
return gtserror.Newf("unsupported emoji filetype: %s (%s)", fileType, ext)
|
||||
}
|
||||
|
||||
// Generate a static image from input emoji path.
|
||||
staticpath, err = ffmpegGenerateStatic(ctx, temppath)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error generating emoji static: %w", err)
|
||||
}
|
||||
|
||||
var pathID string
|
||||
if p.newPathID != "" {
|
||||
|
|
@ -244,95 +213,50 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
|||
string(TypeEmoji),
|
||||
string(SizeOriginal),
|
||||
pathID,
|
||||
info.Extension,
|
||||
ext,
|
||||
)
|
||||
|
||||
// File shouldn't already exist in storage at this point,
|
||||
// but we do a check as it's worth logging / cleaning up.
|
||||
if have, _ := p.mgr.state.Storage.Has(ctx, p.emoji.ImagePath); have {
|
||||
log.Warnf(ctx, "emoji already exists at: %s", p.emoji.ImagePath)
|
||||
|
||||
// Attempt to remove existing emoji at storage path (might be broken / out-of-date)
|
||||
if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImagePath); err != nil {
|
||||
return gtserror.Newf("error removing emoji %s from storage: %v", p.emoji.ImagePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write the final image reader stream to our storage.
|
||||
sz, err = p.mgr.state.Storage.PutStream(ctx, p.emoji.ImagePath, r)
|
||||
// Copy temporary file into storage at path.
|
||||
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.emoji.ImagePath,
|
||||
temppath,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing emoji to storage: %w", err)
|
||||
}
|
||||
|
||||
// Perform final size check in case none was
|
||||
// given previously, or size was mis-reported.
|
||||
// (error here will later perform p.cleanup()).
|
||||
if sz > int64(maxSize) {
|
||||
sz := bytesize.Size(sz) // improves log readability
|
||||
return gtserror.Newf("written emoji size %s greater than max allowed %s", sz, maxSize)
|
||||
// Copy static emoji file into storage at path.
|
||||
staticsz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.emoji.ImageStaticPath,
|
||||
staticpath,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing static to storage: %w", err)
|
||||
}
|
||||
|
||||
// Set final determined file sizes.
|
||||
p.emoji.ImageFileSize = int(filesz)
|
||||
p.emoji.ImageStaticFileSize = int(staticsz)
|
||||
|
||||
// Fill in remaining emoji data now it's stored.
|
||||
p.emoji.ImageURL = uris.URIForAttachment(
|
||||
instanceAccID,
|
||||
string(TypeEmoji),
|
||||
string(SizeOriginal),
|
||||
pathID,
|
||||
info.Extension,
|
||||
ext,
|
||||
)
|
||||
p.emoji.ImageContentType = info.MIME.Value
|
||||
p.emoji.ImageFileSize = int(sz)
|
||||
|
||||
// Get mimetype for the file container
|
||||
// type, falling back to generic data.
|
||||
p.emoji.ImageContentType = getMimeType(ext)
|
||||
|
||||
// We can now consider this cached.
|
||||
p.emoji.Cached = util.Ptr(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProcessingEmoji) finish(ctx context.Context) error {
|
||||
// Get a stream to the original file for further processing.
|
||||
rc, err := p.mgr.state.Storage.GetStream(ctx, p.emoji.ImagePath)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error loading file from storage: %w", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// Decode the image from storage.
|
||||
staticImg, err := decodeImage(rc)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error decoding image: %w", err)
|
||||
}
|
||||
|
||||
// staticImg should be in-memory by
|
||||
// now so we're done with storage.
|
||||
if err := rc.Close(); err != nil {
|
||||
return gtserror.Newf("error closing file: %w", err)
|
||||
}
|
||||
|
||||
// Static img shouldn't exist in storage at this point,
|
||||
// but we do a check as it's worth logging / cleaning up.
|
||||
if have, _ := p.mgr.state.Storage.Has(ctx, p.emoji.ImageStaticPath); have {
|
||||
log.Warnf(ctx, "static emoji already exists at: %s", p.emoji.ImageStaticPath)
|
||||
|
||||
// Attempt to remove existing thumbnail (might be broken / out-of-date).
|
||||
if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImageStaticPath); err != nil {
|
||||
return gtserror.Newf("error removing static emoji %s from storage: %v", p.emoji.ImageStaticPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create emoji PNG encoder stream.
|
||||
enc := staticImg.ToPNG()
|
||||
|
||||
// Stream-encode the PNG static emoji image into our storage driver.
|
||||
sz, err := p.mgr.state.Storage.PutStream(ctx, p.emoji.ImageStaticPath, enc)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error stream-encoding static emoji to storage: %w", err)
|
||||
}
|
||||
|
||||
// Set final written thumb size.
|
||||
p.emoji.ImageStaticFileSize = int(sz)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanup will remove any traces of processing emoji from storage,
|
||||
// and perform any other necessary cleanup steps after failure.
|
||||
func (p *ProcessingEmoji) cleanup(ctx context.Context) {
|
||||
|
|
|
|||
|
|
@ -18,18 +18,12 @@
|
|||
package media
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"image/jpeg"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
errorsv2 "codeberg.org/gruf/go-errors/v2"
|
||||
"codeberg.org/gruf/go-runners"
|
||||
terminator "codeberg.org/superseriousbusiness/exif-terminator"
|
||||
"github.com/disintegration/imaging"
|
||||
"github.com/h2non/filetype"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
|
|
@ -145,19 +139,8 @@ func (p *ProcessingMedia) load(ctx context.Context) (
|
|||
// full-size media attachment details.
|
||||
//
|
||||
// This will update p.media as it goes.
|
||||
if err = p.store(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finish processing by reloading media into
|
||||
// memory to get dimension and generate a thumb.
|
||||
//
|
||||
// This will update p.media as it goes.
|
||||
if err = p.finish(ctx); err != nil {
|
||||
return err //nolint:revive
|
||||
}
|
||||
|
||||
return nil
|
||||
err = p.store(ctx)
|
||||
return err
|
||||
})
|
||||
media = p.media
|
||||
return
|
||||
|
|
@ -167,89 +150,224 @@ func (p *ProcessingMedia) load(ctx context.Context) (
|
|||
// and updates the underlying attachment fields as necessary. It will then stream
|
||||
// bytes from p's reader directly into storage so that it can be retrieved later.
|
||||
func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||
// Load media from provided data fun
|
||||
rc, sz, err := p.dataFn(ctx)
|
||||
// Load media from data func.
|
||||
rc, err := p.dataFn(ctx)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error executing data function: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
// predfine temporary media
|
||||
// file path variables so we
|
||||
// can remove them on error.
|
||||
temppath string
|
||||
thumbpath string
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// Ensure data reader gets closed on return.
|
||||
if err := rc.Close(); err != nil {
|
||||
log.Errorf(ctx, "error closing data reader: %v", err)
|
||||
if err := remove(temppath, thumbpath); err != nil {
|
||||
log.Errorf(ctx, "error(s) cleaning up files: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Assume we're given correct file
|
||||
// size, we can overwrite this later
|
||||
// once we know THE TRUTH.
|
||||
fileSize := int(sz)
|
||||
p.media.File.FileSize = fileSize
|
||||
|
||||
// Prepare to read bytes from
|
||||
// file header or magic number.
|
||||
hdrBuf := newHdrBuf(fileSize)
|
||||
|
||||
// Read into buffer as much as possible.
|
||||
//
|
||||
// UnexpectedEOF means we couldn't read up to the
|
||||
// given size, but we may still have read something.
|
||||
//
|
||||
// EOF means we couldn't read anything at all.
|
||||
//
|
||||
// Any other error likely means the connection messed up.
|
||||
//
|
||||
// In other words, rather counterintuitively, we
|
||||
// can only proceed on no error or unexpected error!
|
||||
n, err := io.ReadFull(rc, hdrBuf)
|
||||
// Drain reader to tmp file
|
||||
// (this reader handles close).
|
||||
temppath, err = drainToTmp(rc)
|
||||
if err != nil {
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
return gtserror.Newf("error reading first bytes of incoming media: %w", err)
|
||||
return gtserror.Newf("error draining data to tmp: %w", err)
|
||||
}
|
||||
|
||||
// Pass input file through ffprobe to
|
||||
// parse further metadata information.
|
||||
result, err := ffprobe(ctx, temppath)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error ffprobing data: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
// No errors parsing data.
|
||||
case result.Error == nil:
|
||||
|
||||
// Data type unhandleable by ffprobe.
|
||||
case result.Error.Code == -1094995529:
|
||||
log.Warn(ctx, "unsupported data type")
|
||||
return nil
|
||||
|
||||
default:
|
||||
return gtserror.Newf("ffprobe error: %w", err)
|
||||
}
|
||||
|
||||
var ext string
|
||||
|
||||
// Set the media type from ffprobe format data.
|
||||
p.media.Type, ext = result.Format.GetFileType()
|
||||
if p.media.Type == gtsmodel.FileTypeUnknown {
|
||||
|
||||
// Return early (deleting file)
|
||||
// for unhandled file types.
|
||||
return nil
|
||||
}
|
||||
|
||||
switch p.media.Type {
|
||||
case gtsmodel.FileTypeImage:
|
||||
// Pass file through ffmpeg clearing
|
||||
// any excess metadata (e.g. EXIF).
|
||||
if err := ffmpegClearMetadata(ctx,
|
||||
temppath, ext,
|
||||
); err != nil {
|
||||
return gtserror.Newf("error cleaning metadata: %w", err)
|
||||
}
|
||||
|
||||
// Initial file size was misreported, so we didn't read
|
||||
// fully into hdrBuf. Reslice it to the size we did read.
|
||||
hdrBuf = hdrBuf[:n]
|
||||
fileSize = n
|
||||
p.media.File.FileSize = fileSize
|
||||
}
|
||||
// Extract image metadata from streams.
|
||||
width, height, err := result.ImageMeta()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.media.FileMeta.Original.Width = width
|
||||
p.media.FileMeta.Original.Height = height
|
||||
p.media.FileMeta.Original.Size = (width * height)
|
||||
p.media.FileMeta.Original.Aspect = float32(width) / float32(height)
|
||||
|
||||
// Parse file type info from header buffer.
|
||||
// This should only ever error if the buffer
|
||||
// is empty (ie., the attachment is 0 bytes).
|
||||
info, err := filetype.Match(hdrBuf)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error parsing file type: %w", err)
|
||||
}
|
||||
// Determine thumbnail dimensions to use.
|
||||
thumbWidth, thumbHeight := thumbSize(width, height)
|
||||
p.media.FileMeta.Small.Width = thumbWidth
|
||||
p.media.FileMeta.Small.Height = thumbHeight
|
||||
p.media.FileMeta.Small.Size = (thumbWidth * thumbHeight)
|
||||
p.media.FileMeta.Small.Aspect = float32(thumbWidth) / float32(thumbHeight)
|
||||
|
||||
// Recombine header bytes with remaining stream
|
||||
r := io.MultiReader(bytes.NewReader(hdrBuf), rc)
|
||||
// Generate a thumbnail image from input image path.
|
||||
thumbpath, err = ffmpegGenerateThumb(ctx, temppath,
|
||||
thumbWidth,
|
||||
thumbHeight,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error generating image thumb: %w", err)
|
||||
}
|
||||
|
||||
// Assume we'll put
|
||||
// this file in storage.
|
||||
store := true
|
||||
case gtsmodel.FileTypeVideo:
|
||||
// Pass file through ffmpeg clearing
|
||||
// any excess metadata (e.g. EXIF).
|
||||
if err := ffmpegClearMetadata(ctx,
|
||||
temppath, ext,
|
||||
); err != nil {
|
||||
return gtserror.Newf("error cleaning metadata: %w", err)
|
||||
}
|
||||
|
||||
switch info.Extension {
|
||||
case "mp4":
|
||||
// No problem.
|
||||
// Extract video metadata we can from streams.
|
||||
width, height, framerate, err := result.VideoMeta()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.media.FileMeta.Original.Width = width
|
||||
p.media.FileMeta.Original.Height = height
|
||||
p.media.FileMeta.Original.Size = (width * height)
|
||||
p.media.FileMeta.Original.Aspect = float32(width) / float32(height)
|
||||
p.media.FileMeta.Original.Framerate = &framerate
|
||||
|
||||
case "gif":
|
||||
// No problem
|
||||
// Extract total duration from format.
|
||||
duration := result.Format.GetDuration()
|
||||
p.media.FileMeta.Original.Duration = &duration
|
||||
|
||||
case "jpg", "jpeg", "png", "webp":
|
||||
if fileSize > 0 {
|
||||
// A file size was provided so we can clean
|
||||
// exif data from image as we're streaming it.
|
||||
r, err = terminator.Terminate(r, fileSize, info.Extension)
|
||||
// Extract total bitrate from format.
|
||||
bitrate := result.Format.GetBitRate()
|
||||
p.media.FileMeta.Original.Bitrate = &bitrate
|
||||
|
||||
// Determine thumbnail dimensions to use.
|
||||
thumbWidth, thumbHeight := thumbSize(width, height)
|
||||
p.media.FileMeta.Small.Width = thumbWidth
|
||||
p.media.FileMeta.Small.Height = thumbHeight
|
||||
p.media.FileMeta.Small.Size = (thumbWidth * thumbHeight)
|
||||
p.media.FileMeta.Small.Aspect = float32(thumbWidth) / float32(thumbHeight)
|
||||
|
||||
// Extract a thumbnail frame from input video path.
|
||||
thumbpath, err = ffmpegGenerateThumb(ctx, temppath,
|
||||
thumbWidth,
|
||||
thumbHeight,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error extracting video frame: %w", err)
|
||||
}
|
||||
|
||||
case gtsmodel.FileTypeAudio:
|
||||
// Extract total duration from format.
|
||||
duration := result.Format.GetDuration()
|
||||
p.media.FileMeta.Original.Duration = &duration
|
||||
|
||||
// Extract total bitrate from format.
|
||||
bitrate := result.Format.GetBitRate()
|
||||
p.media.FileMeta.Original.Bitrate = &bitrate
|
||||
|
||||
// Extract image metadata from streams (if any),
|
||||
// this will only exist for embedded album art.
|
||||
width, height, _ := result.ImageMeta()
|
||||
if width > 0 && height > 0 {
|
||||
|
||||
// Determine thumbnail dimensions to use.
|
||||
thumbWidth, thumbHeight := thumbSize(width, height)
|
||||
p.media.FileMeta.Small.Width = thumbWidth
|
||||
p.media.FileMeta.Small.Height = thumbHeight
|
||||
p.media.FileMeta.Small.Size = (thumbWidth * thumbHeight)
|
||||
p.media.FileMeta.Small.Aspect = float32(thumbWidth) / float32(thumbHeight)
|
||||
|
||||
// Generate a thumbnail image from input image path.
|
||||
thumbpath, err = ffmpegGenerateThumb(ctx, temppath,
|
||||
thumbWidth,
|
||||
thumbHeight,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error cleaning exif data: %w", err)
|
||||
return gtserror.Newf("error generating image thumb: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// The file is not a supported format that we can process, so we can't do much with it.
|
||||
log.Warnf(ctx, "unsupported media extension '%s'; not caching locally", info.Extension)
|
||||
store = false
|
||||
log.Warnf(ctx, "unsupported type: %s (%s)", p.media.Type, result.Format.FormatName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate final media attachment file path.
|
||||
p.media.File.Path = uris.StoragePathForAttachment(
|
||||
p.media.AccountID,
|
||||
string(TypeAttachment),
|
||||
string(SizeOriginal),
|
||||
p.media.ID,
|
||||
ext,
|
||||
)
|
||||
|
||||
// Copy temporary file into storage at path.
|
||||
filesz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.media.File.Path,
|
||||
temppath,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing media to storage: %w", err)
|
||||
}
|
||||
|
||||
// Set final determined file size.
|
||||
p.media.File.FileSize = int(filesz)
|
||||
|
||||
if thumbpath != "" {
|
||||
// Note that neither thumbnail storage
|
||||
// nor a blurhash are needed for audio.
|
||||
|
||||
if p.media.Blurhash == "" {
|
||||
// Generate blurhash (if not already) from thumbnail.
|
||||
p.media.Blurhash, err = generateBlurhash(thumbpath)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error generating thumb blurhash: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy thumbnail file into storage at path.
|
||||
thumbsz, err := p.mgr.state.Storage.PutFile(ctx,
|
||||
p.media.Thumbnail.Path,
|
||||
thumbpath,
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing thumb to storage: %w", err)
|
||||
}
|
||||
|
||||
// Set final determined thumbnail size.
|
||||
p.media.Thumbnail.FileSize = int(thumbsz)
|
||||
}
|
||||
|
||||
// Fill in correct attachment
|
||||
|
|
@ -259,194 +377,17 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
|||
string(TypeAttachment),
|
||||
string(SizeOriginal),
|
||||
p.media.ID,
|
||||
info.Extension,
|
||||
ext,
|
||||
)
|
||||
|
||||
// Prefer discovered MIME, fallback to generic data stream.
|
||||
mime := cmp.Or(info.MIME.Value, "application/octet-stream")
|
||||
p.media.File.ContentType = mime
|
||||
|
||||
// Calculate final media attachment file path.
|
||||
p.media.File.Path = uris.StoragePathForAttachment(
|
||||
p.media.AccountID,
|
||||
string(TypeAttachment),
|
||||
string(SizeOriginal),
|
||||
p.media.ID,
|
||||
info.Extension,
|
||||
)
|
||||
|
||||
// We should only try to store the file if it's
|
||||
// a format we can keep processing, otherwise be
|
||||
// a bit cheeky: don't store it and let users
|
||||
// click through to the remote server instead.
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
|
||||
// File shouldn't already exist in storage at this point,
|
||||
// but we do a check as it's worth logging / cleaning up.
|
||||
if have, _ := p.mgr.state.Storage.Has(ctx, p.media.File.Path); have {
|
||||
log.Warnf(ctx, "media already exists at: %s", p.media.File.Path)
|
||||
|
||||
// Attempt to remove existing media at storage path (might be broken / out-of-date)
|
||||
if err := p.mgr.state.Storage.Delete(ctx, p.media.File.Path); err != nil {
|
||||
return gtserror.Newf("error removing media %s from storage: %v", p.media.File.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write the final reader stream to our storage driver.
|
||||
sz, err = p.mgr.state.Storage.PutStream(ctx, p.media.File.Path, r)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error writing media to storage: %w", err)
|
||||
}
|
||||
|
||||
// Set actual written size
|
||||
// as authoritative file size.
|
||||
p.media.File.FileSize = int(sz)
|
||||
// Get mimetype for the file container
|
||||
// type, falling back to generic data.
|
||||
p.media.File.ContentType = getMimeType(ext)
|
||||
|
||||
// We can now consider this cached.
|
||||
p.media.Cached = util.Ptr(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ProcessingMedia) finish(ctx context.Context) error {
|
||||
// Nothing else to do if
|
||||
// media was not cached.
|
||||
if !*p.media.Cached {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a stream to the original file for further processing.
|
||||
rc, err := p.mgr.state.Storage.GetStream(ctx, p.media.File.Path)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error loading file from storage: %w", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// fullImg is the processed version of
|
||||
// the original (stripped + reoriented).
|
||||
var fullImg *gtsImage
|
||||
|
||||
// Depending on the content type, we
|
||||
// can do various types of decoding.
|
||||
switch p.media.File.ContentType {
|
||||
|
||||
// .jpeg, .gif, .webp image type
|
||||
case mimeImageJpeg, mimeImageGif, mimeImageWebp:
|
||||
fullImg, err = decodeImage(rc,
|
||||
imaging.AutoOrientation(true),
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error decoding image: %w", err)
|
||||
}
|
||||
|
||||
// Mark as no longer unknown type now
|
||||
// we know for sure we can decode it.
|
||||
p.media.Type = gtsmodel.FileTypeImage
|
||||
|
||||
// .png image (requires ancillary chunk stripping)
|
||||
case mimeImagePng:
|
||||
fullImg, err = decodeImage(
|
||||
&pngAncillaryChunkStripper{Reader: rc},
|
||||
imaging.AutoOrientation(true),
|
||||
)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error decoding image: %w", err)
|
||||
}
|
||||
|
||||
// Mark as no longer unknown type now
|
||||
// we know for sure we can decode it.
|
||||
p.media.Type = gtsmodel.FileTypeImage
|
||||
|
||||
// .mp4 video type
|
||||
case mimeVideoMp4:
|
||||
video, err := decodeVideoFrame(rc)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error decoding video: %w", err)
|
||||
}
|
||||
|
||||
// Set video frame as image.
|
||||
fullImg = video.frame
|
||||
|
||||
// Set video metadata in attachment info.
|
||||
p.media.FileMeta.Original.Duration = &video.duration
|
||||
p.media.FileMeta.Original.Framerate = &video.framerate
|
||||
p.media.FileMeta.Original.Bitrate = &video.bitrate
|
||||
|
||||
// Mark as no longer unknown type now
|
||||
// we know for sure we can decode it.
|
||||
p.media.Type = gtsmodel.FileTypeVideo
|
||||
}
|
||||
|
||||
// fullImg should be in-memory by
|
||||
// now so we're done with storage.
|
||||
if err := rc.Close(); err != nil {
|
||||
return gtserror.Newf("error closing file: %w", err)
|
||||
}
|
||||
|
||||
// Set full-size dimensions in attachment info.
|
||||
p.media.FileMeta.Original.Width = fullImg.Width()
|
||||
p.media.FileMeta.Original.Height = fullImg.Height()
|
||||
p.media.FileMeta.Original.Size = fullImg.Size()
|
||||
p.media.FileMeta.Original.Aspect = fullImg.AspectRatio()
|
||||
|
||||
// Get smaller thumbnail image
|
||||
thumbImg := fullImg.Thumbnail()
|
||||
|
||||
// Garbage collector, you may
|
||||
// now take our large son.
|
||||
fullImg = nil
|
||||
|
||||
// Only generate blurhash
|
||||
// from thumb if necessary.
|
||||
if p.media.Blurhash == "" {
|
||||
hash, err := thumbImg.Blurhash()
|
||||
if err != nil {
|
||||
return gtserror.Newf("error generating blurhash: %w", err)
|
||||
}
|
||||
|
||||
// Set the attachment blurhash.
|
||||
p.media.Blurhash = hash
|
||||
}
|
||||
|
||||
// Thumbnail shouldn't exist in storage at this point,
|
||||
// but we do a check as it's worth logging / cleaning up.
|
||||
if have, _ := p.mgr.state.Storage.Has(ctx, p.media.Thumbnail.Path); have {
|
||||
log.Warnf(ctx, "thumbnail already exists at: %s", p.media.Thumbnail.Path)
|
||||
|
||||
// Attempt to remove existing thumbnail (might be broken / out-of-date).
|
||||
if err := p.mgr.state.Storage.Delete(ctx, p.media.Thumbnail.Path); err != nil {
|
||||
return gtserror.Newf("error removing thumbnail %s from storage: %v", p.media.Thumbnail.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a thumbnail JPEG encoder stream.
|
||||
enc := thumbImg.ToJPEG(&jpeg.Options{
|
||||
|
||||
// Good enough for
|
||||
// a thumbnail.
|
||||
Quality: 70,
|
||||
})
|
||||
|
||||
// Stream-encode the JPEG thumbnail image into our storage driver.
|
||||
sz, err := p.mgr.state.Storage.PutStream(ctx, p.media.Thumbnail.Path, enc)
|
||||
if err != nil {
|
||||
return gtserror.Newf("error stream-encoding thumbnail to storage: %w", err)
|
||||
}
|
||||
|
||||
// Set final written thumb size.
|
||||
p.media.Thumbnail.FileSize = int(sz)
|
||||
|
||||
// Set thumbnail dimensions in attachment info.
|
||||
p.media.FileMeta.Small = gtsmodel.Small{
|
||||
Width: thumbImg.Width(),
|
||||
Height: thumbImg.Height(),
|
||||
Size: thumbImg.Size(),
|
||||
Aspect: thumbImg.AspectRatio(),
|
||||
}
|
||||
|
||||
// Finally set the attachment as processed.
|
||||
// Finally set the attachment as finished processing.
|
||||
p.media.Processing = gtsmodel.ProcessingStatusProcessed
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -24,12 +24,13 @@ import (
|
|||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
)
|
||||
|
||||
type DereferenceMedia func(ctx context.Context, iri *url.URL) (io.ReadCloser, int64, error)
|
||||
type DereferenceMedia func(ctx context.Context, iri *url.URL, maxsz int64) (io.ReadCloser, error)
|
||||
|
||||
// RefetchEmojis iterates through remote emojis (for the given domain, or all if domain is empty string).
|
||||
//
|
||||
|
|
@ -48,6 +49,9 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
|||
refetchIDs []string
|
||||
)
|
||||
|
||||
// Get max supported remote emoji media size.
|
||||
maxsz := config.GetMediaEmojiRemoteMaxSize()
|
||||
|
||||
// page through emojis 20 at a time, looking for those with missing images
|
||||
for {
|
||||
// Fetch next block of emojis from database
|
||||
|
|
@ -107,8 +111,8 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM
|
|||
continue
|
||||
}
|
||||
|
||||
dataFunc := func(ctx context.Context) (reader io.ReadCloser, fileSize int64, err error) {
|
||||
return dereferenceMedia(ctx, emojiImageIRI)
|
||||
dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) {
|
||||
return dereferenceMedia(ctx, emojiImageIRI, int64(maxsz))
|
||||
}
|
||||
|
||||
processingEmoji, err := m.RefreshEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 2.8 KiB After Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 1,010 B After Width: | Height: | Size: 1.5 KiB |
|
Before Width: | Height: | Size: 2.8 KiB After Width: | Height: | Size: 3.7 KiB |
|
Before Width: | Height: | Size: 878 B After Width: | Height: | Size: 709 B |
|
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 5.9 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 1.9 KiB After Width: | Height: | Size: 4.4 KiB |
BIN
internal/media/test/test-opus-original.opus
Normal file
BIN
internal/media/test/test-opus-processed.opus
Normal file
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 7.8 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 7.9 KiB |
|
|
@ -144,4 +144,4 @@ type AdditionalEmojiInfo struct {
|
|||
}
|
||||
|
||||
// DataFunc represents a function used to retrieve the raw bytes of a piece of media.
|
||||
type DataFunc func(ctx context.Context) (reader io.ReadCloser, fileSize int64, err error)
|
||||
type DataFunc func(ctx context.Context) (reader io.ReadCloser, err error)
|
||||
|
|
|
|||
|
|
@ -17,25 +17,161 @@
|
|||
|
||||
package media
|
||||
|
||||
// newHdrBuf returns a buffer of suitable size to
|
||||
// read bytes from a file header or magic number.
|
||||
//
|
||||
// File header is *USUALLY* 261 bytes at the start
|
||||
// of a file; magic number can be much less than
|
||||
// that (just a few bytes).
|
||||
//
|
||||
// To cover both cases, this function returns a buffer
|
||||
// suitable for whichever is smallest: the first 261
|
||||
// bytes of the file, or the whole file.
|
||||
//
|
||||
// See:
|
||||
//
|
||||
// - https://en.wikipedia.org/wiki/File_format#File_header
|
||||
// - https://github.com/h2non/filetype.
|
||||
func newHdrBuf(fileSize int) []byte {
|
||||
bufSize := 261
|
||||
if fileSize > 0 && fileSize < bufSize {
|
||||
bufSize = fileSize
|
||||
import (
|
||||
"cmp"
|
||||
"errors"
|
||||
"fmt"
|
||||
"image"
|
||||
"image/jpeg"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"codeberg.org/gruf/go-bytesize"
|
||||
"codeberg.org/gruf/go-iotools"
|
||||
"codeberg.org/gruf/go-mimetypes"
|
||||
"github.com/buckket/go-blurhash"
|
||||
"github.com/disintegration/imaging"
|
||||
)
|
||||
|
||||
// thumbSize returns the dimensions to use for an input
|
||||
// image of given width / height, for its outgoing thumbnail.
|
||||
// This maintains the original image aspect ratio.
|
||||
func thumbSize(width, height int) (int, int) {
|
||||
const (
|
||||
maxThumbWidth = 512
|
||||
maxThumbHeight = 512
|
||||
)
|
||||
switch {
|
||||
// Simplest case, within bounds!
|
||||
case width < maxThumbWidth &&
|
||||
height < maxThumbHeight:
|
||||
return width, height
|
||||
|
||||
// Width is larger side.
|
||||
case width > height:
|
||||
p := float32(width) / float32(maxThumbWidth)
|
||||
return maxThumbWidth, int(float32(height) / p)
|
||||
|
||||
// Height is larger side.
|
||||
case height > width:
|
||||
p := float32(height) / float32(maxThumbHeight)
|
||||
return int(float32(width) / p), maxThumbHeight
|
||||
|
||||
// Square.
|
||||
default:
|
||||
return maxThumbWidth, maxThumbHeight
|
||||
}
|
||||
return make([]byte, bufSize)
|
||||
}
|
||||
|
||||
// jpegDecode decodes the JPEG at filepath into parsed image.Image.
|
||||
func jpegDecode(filepath string) (image.Image, error) {
|
||||
// Open the file at given path.
|
||||
file, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decode image from file.
|
||||
img, err := jpeg.Decode(file)
|
||||
|
||||
// Done with file.
|
||||
_ = file.Close()
|
||||
|
||||
return img, err
|
||||
}
|
||||
|
||||
// generateBlurhash generates a blurhash for JPEG at filepath.
|
||||
func generateBlurhash(filepath string) (string, error) {
|
||||
// Decode JPEG file at given path.
|
||||
img, err := jpegDecode(filepath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// for generating blurhashes, it's more cost effective to
|
||||
// lose detail since it's blurry, so make a tiny version.
|
||||
tiny := imaging.Resize(img, 64, 64, imaging.NearestNeighbor)
|
||||
|
||||
// Drop the larger image
|
||||
// ref as soon as possible
|
||||
// to allow GC to claim.
|
||||
img = nil //nolint
|
||||
|
||||
// Generate blurhash for thumbnail.
|
||||
return blurhash.Encode(4, 3, tiny)
|
||||
}
|
||||
|
||||
// getMimeType returns a suitable mimetype for file extension.
|
||||
func getMimeType(ext string) string {
|
||||
const defaultType = "application/octet-stream"
|
||||
return cmp.Or(mimetypes.MimeTypes[ext], defaultType)
|
||||
}
|
||||
|
||||
// drainToTmp drains data from given reader into a new temp file
|
||||
// and closes it, returning the path of the resulting temp file.
|
||||
//
|
||||
// Note that this function specifically makes attempts to unwrap the
|
||||
// io.ReadCloser as much as it can to underlying type, to maximise
|
||||
// chance that Linux's sendfile syscall can be utilised for optimal
|
||||
// draining of data source to temporary file storage.
|
||||
func drainToTmp(rc io.ReadCloser) (string, error) {
|
||||
tmp, err := os.CreateTemp(os.TempDir(), "gotosocial-*")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Close readers
|
||||
// on func return.
|
||||
defer tmp.Close()
|
||||
defer rc.Close()
|
||||
|
||||
// Extract file path.
|
||||
path := tmp.Name()
|
||||
|
||||
// Limited reader (if any).
|
||||
var lr *io.LimitedReader
|
||||
var limit int64
|
||||
|
||||
// Reader type to use
|
||||
// for draining to tmp.
|
||||
rd := (io.Reader)(rc)
|
||||
|
||||
// Check if reader is actually wrapped,
|
||||
// (as our http client wraps close func).
|
||||
rct, ok := rc.(*iotools.ReadCloserType)
|
||||
if ok {
|
||||
|
||||
// Get unwrapped.
|
||||
rd = rct.Reader
|
||||
|
||||
// Extract limited reader if wrapped.
|
||||
lr, limit = iotools.GetReaderLimit(rd)
|
||||
}
|
||||
|
||||
// Drain reader into tmp.
|
||||
_, err = tmp.ReadFrom(rd)
|
||||
if err != nil {
|
||||
return path, err
|
||||
}
|
||||
|
||||
// Check to see if limit was reached,
|
||||
// (produces more useful error messages).
|
||||
if lr != nil && !iotools.AtEOF(lr.R) {
|
||||
return path, fmt.Errorf("reached read limit %s", bytesize.Size(limit))
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// remove only removes paths if not-empty.
|
||||
func remove(paths ...string) error {
|
||||
var errs []error
|
||||
for _, path := range paths {
|
||||
if path != "" {
|
||||
if err := os.Remove(path); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error removing %s: %w", path, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,141 +0,0 @@
|
|||
// GoToSocial
|
||||
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package media
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/abema/go-mp4"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/iotools"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
)
|
||||
|
||||
type gtsVideo struct {
|
||||
frame *gtsImage
|
||||
duration float32 // in seconds
|
||||
bitrate uint64
|
||||
framerate float32
|
||||
}
|
||||
|
||||
// decodeVideoFrame decodes and returns an image from a single frame in the given video stream.
|
||||
// (note: currently this only returns a blank image resized to fit video dimensions).
|
||||
func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
|
||||
// Check if video stream supports
|
||||
// seeking, usually when *os.File.
|
||||
rsc, ok := r.(io.ReadSeekCloser)
|
||||
if !ok {
|
||||
var err error
|
||||
|
||||
// Store stream to temporary location
|
||||
// in order that we can get seek-reads.
|
||||
rsc, err = iotools.TempFileSeeker(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating temp file seeker: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Ensure temp. read seeker closed.
|
||||
if err := rsc.Close(); err != nil {
|
||||
log.Errorf(nil, "error closing temp file seeker: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// probe the video file to extract useful metadata from it; for methodology, see:
|
||||
// https://github.com/abema/go-mp4/blob/7d8e5a7c5e644e0394261b0cf72fef79ce246d31/mp4tool/probe/probe.go#L85-L154
|
||||
info, err := mp4.Probe(rsc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during mp4 probe: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
width int
|
||||
height int
|
||||
videoBitrate uint64
|
||||
audioBitrate uint64
|
||||
video gtsVideo
|
||||
)
|
||||
|
||||
for _, tr := range info.Tracks {
|
||||
if tr.AVC == nil {
|
||||
// audio track
|
||||
if br := tr.Samples.GetBitrate(tr.Timescale); br > audioBitrate {
|
||||
audioBitrate = br
|
||||
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > audioBitrate {
|
||||
audioBitrate = br
|
||||
}
|
||||
|
||||
if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) {
|
||||
video.duration = float32(d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// video track
|
||||
if w := int(tr.AVC.Width); w > width {
|
||||
width = w
|
||||
}
|
||||
|
||||
if h := int(tr.AVC.Height); h > height {
|
||||
height = h
|
||||
}
|
||||
|
||||
if br := tr.Samples.GetBitrate(tr.Timescale); br > videoBitrate {
|
||||
videoBitrate = br
|
||||
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > videoBitrate {
|
||||
videoBitrate = br
|
||||
}
|
||||
|
||||
if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) {
|
||||
video.framerate = float32(len(tr.Samples)) / float32(d)
|
||||
video.duration = float32(d)
|
||||
}
|
||||
}
|
||||
|
||||
// overall bitrate should be audio + video combined
|
||||
// (since they're both playing at the same time)
|
||||
video.bitrate = audioBitrate + videoBitrate
|
||||
|
||||
// Check for empty video metadata.
|
||||
var empty []string
|
||||
if width == 0 {
|
||||
empty = append(empty, "width")
|
||||
}
|
||||
if height == 0 {
|
||||
empty = append(empty, "height")
|
||||
}
|
||||
if video.duration == 0 {
|
||||
empty = append(empty, "duration")
|
||||
}
|
||||
if video.framerate == 0 {
|
||||
empty = append(empty, "framerate")
|
||||
}
|
||||
if video.bitrate == 0 {
|
||||
empty = append(empty, "bitrate")
|
||||
}
|
||||
if len(empty) > 0 {
|
||||
return nil, fmt.Errorf("error determining video metadata: %v", empty)
|
||||
}
|
||||
|
||||
// Create new empty "frame" image.
|
||||
// TODO: decode frame from video file.
|
||||
video.frame = blankImage(width, height)
|
||||
|
||||
return &video, nil
|
||||
}
|
||||