[bugfix] Update exif-terminator (fix png issue) (#2391)

* [bugfix] Update exif-terminator (fix png issue)

* bump exif terminator

* fix tests
This commit is contained in:
tobi 2023-11-30 10:50:28 +01:00 committed by GitHub
commit 0108463e7b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 752 additions and 830 deletions

View file

@ -1,47 +0,0 @@
/*
exif-terminator
Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package terminator
import "fmt"
var logger ErrorLogger
func init() {
logger = &defaultErrorLogger{}
}
// ErrorLogger denotes a generic error logging function.
type ErrorLogger interface {
Error(args ...interface{})
}
type defaultErrorLogger struct{}
func (d *defaultErrorLogger) Error(args ...interface{}) {
fmt.Println(args...)
}
// SetErrorLogger allows a user of the exif-terminator library
// to set the logger that will be used for error logging.
//
// If it is not set, the default error logger will be used, which
// just prints errors to stdout.
func SetErrorLogger(errorLogger ErrorLogger) {
logger = errorLogger
}

View file

@ -19,10 +19,9 @@
package terminator
import (
"encoding/binary"
"io"
pngstructure "github.com/dsoprea/go-png-image-structure/v2"
pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2"
)
type pngVisitor struct {
@ -45,49 +44,50 @@ func (v *pngVisitor) split(data []byte, atEOF bool) (int, []byte, error) {
}
}
// check if the splitter has any new chunks in it that we haven't written yet
chunkSlice := v.ps.Chunks()
// Check if the splitter now has
// any new chunks in it for us.
chunkSlice, err := v.ps.Chunks()
if err != nil {
return advance, token, err
}
// Write each chunk by passing it
// through our custom write func,
// which strips out exif and fixes
// the CRC of each chunk.
chunks := chunkSlice.Chunks()
for i, chunk := range chunks {
// look through all the chunks in the splitter
if i > v.lastWrittenChunk {
// we've got a chunk we haven't written yet! write it...
if err := v.writeChunk(chunk); err != nil {
return advance, token, err
}
// then remove the data
chunk.Data = chunk.Data[:0]
// and update
v.lastWrittenChunk = i
if i <= v.lastWrittenChunk {
// Skip already
// written chunks.
continue
}
// Write this new chunk.
if err := v.writeChunk(chunk); err != nil {
return advance, token, err
}
v.lastWrittenChunk = i
// Zero data; here you
// go garbage collector.
chunk.Data = nil
}
return advance, token, err
}
func (v *pngVisitor) writeChunk(chunk *pngstructure.Chunk) error {
if err := binary.Write(v.writer, binary.BigEndian, chunk.Length); err != nil {
return err
}
if _, err := v.writer.Write([]byte(chunk.Type)); err != nil {
return err
}
if chunk.Type == pngstructure.EXifChunkType {
blank := make([]byte, len(chunk.Data))
if _, err := v.writer.Write(blank); err != nil {
return err
}
} else {
if _, err := v.writer.Write(chunk.Data); err != nil {
return err
}
// Replace exif data
// with zero bytes.
clear(chunk.Data)
}
if err := binary.Write(v.writer, binary.BigEndian, chunk.Crc); err != nil {
return err
}
// Fix CRC of each chunk.
chunk.UpdateCrc32()
return nil
// finally, write chunk to writer.
_, err := chunk.WriteTo(v.writer)
return err
}

View file

@ -25,29 +25,34 @@ import (
"fmt"
"io"
pngstructure "github.com/dsoprea/go-png-image-structure/v2"
jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2"
pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2"
)
func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error) {
// to avoid keeping too much stuff in memory we want to pipe data directly
// To avoid keeping too much stuff
// in memory we want to pipe data
// directly to the reader.
pipeReader, pipeWriter := io.Pipe()
// we don't know ahead of time how long segments might be: they could be as large as
// the file itself, so unfortunately we need to allocate a buffer here that'scanner as large
// as the file
// We don't know ahead of time how long
// segments might be: they could be as
// large as the file itself, so we need
// a buffer with generous overhead.
scanner := bufio.NewScanner(in)
scanner.Buffer([]byte{}, fileSize)
var err error
var err error
switch mediaType {
case "image/jpeg", "jpeg", "jpg":
err = terminateJpeg(scanner, pipeWriter, fileSize)
case "image/webp", "webp":
err = terminateWebp(scanner, pipeWriter)
case "image/png", "png":
// for pngs we need to skip the header bytes, so read them in
// and check we're really dealing with a png here
// For pngs we need to skip the header bytes, so read
// them in and check we're really dealing with a png.
header := make([]byte, len(pngstructure.PngSignature))
if _, headerError := in.Read(header); headerError != nil {
err = headerError
@ -67,68 +72,87 @@ func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error)
return pipeReader, err
}
func terminateJpeg(scanner *bufio.Scanner, writer io.WriteCloser, expectedFileSize int) error {
// jpeg visitor is where the spicy hack of streaming the de-exifed data is contained
func terminateJpeg(scanner *bufio.Scanner, writer *io.PipeWriter, expectedFileSize int) error {
v := &jpegVisitor{
writer: writer,
expectedFileSize: expectedFileSize,
}
// provide the visitor to the splitter so that it triggers on every section scan
// Provide the visitor to the splitter so
// that it triggers on every section scan.
js := jpegstructure.NewJpegSplitter(v)
// the visitor also needs to read back the list of segments: for this it needs
// to know what jpeg splitter it's attached to, so give it a pointer to the splitter
// The visitor also needs to read back the
// list of segments: for this it needs to
// know what jpeg splitter it's attached to,
// so give it a pointer to the splitter.
v.js = js
// use the jpeg splitters 'split' function, which satisfies the bufio.SplitFunc interface
// Jpeg visitor's 'split' function
// satisfies bufio.SplitFunc{}.
scanner.Split(js.Split)
scanAndClose(scanner, writer)
go scanAndClose(scanner, writer)
return nil
}
func terminateWebp(scanner *bufio.Scanner, writer io.WriteCloser) error {
func terminateWebp(scanner *bufio.Scanner, writer *io.PipeWriter) error {
v := &webpVisitor{
writer: writer,
}
// use the webp visitor's 'split' function, which satisfies the bufio.SplitFunc interface
// Webp visitor's 'split' function
// satisfies bufio.SplitFunc{}.
scanner.Split(v.split)
scanAndClose(scanner, writer)
go scanAndClose(scanner, writer)
return nil
}
func terminatePng(scanner *bufio.Scanner, writer io.WriteCloser) error {
func terminatePng(scanner *bufio.Scanner, writer *io.PipeWriter) error {
ps := pngstructure.NewPngSplitter()
// Don't bother checking CRC;
// we're overwriting it anyway.
ps.DoCheckCrc(false)
v := &pngVisitor{
ps: ps,
writer: writer,
lastWrittenChunk: -1,
}
// use the png visitor's 'split' function, which satisfies the bufio.SplitFunc interface
// Png visitor's 'split' function
// satisfies bufio.SplitFunc{}.
scanner.Split(v.split)
scanAndClose(scanner, writer)
go scanAndClose(scanner, writer)
return nil
}
func scanAndClose(scanner *bufio.Scanner, writer io.WriteCloser) {
// scan asynchronously until there's nothing left to scan, and then close the writer
// so that the reader on the other side knows that we're done
//
// due to the nature of io.Pipe, writing won't actually work
// until the pipeReader starts being read by the caller, which
// is why we do this asynchronously
go func() {
defer writer.Close()
for scanner.Scan() {
}
if scanner.Err() != nil {
logger.Error(scanner.Err())
}
// scanAndClose scans through the given scanner until there's
// nothing left to scan, and then closes the writer so that the
// reader on the other side of the pipe knows that we're done.
//
// Any error encountered when scanning will be logged by terminator.
//
// Due to the nature of io.Pipe, writing won't actually work
// until the pipeReader starts being read by the caller, which
// is why this function should always be called asynchronously.
func scanAndClose(scanner *bufio.Scanner, writer *io.PipeWriter) {
var err error
defer func() {
// Always close writer, using returned
// scanner error (if any). If err is nil
// then the standard io.EOF will be used.
// (this will not overwrite existing).
writer.CloseWithError(err)
}()
for scanner.Scan() {
}
// Set error on return.
err = scanner.Err()
}

View file

@ -0,0 +1,9 @@
MIT LICENSE
Copyright 2020 Dustin Oprea
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,81 @@
package pngstructure
import (
"bytes"
"fmt"
"encoding/binary"
)
type ChunkDecoder struct {
}
func NewChunkDecoder() *ChunkDecoder {
return new(ChunkDecoder)
}
func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) {
switch c.Type {
case "IHDR":
return cd.decodeIHDR(c)
}
// We don't decode this type.
return nil, nil
}
type ChunkIHDR struct {
Width uint32
Height uint32
BitDepth uint8
ColorType uint8
CompressionMethod uint8
FilterMethod uint8
InterlaceMethod uint8
}
func (ihdr *ChunkIHDR) String() string {
return fmt.Sprintf("IHDR<WIDTH=(%d) HEIGHT=(%d) DEPTH=(%d) COLOR-TYPE=(%d) COMP-METHOD=(%d) FILTER-METHOD=(%d) INTRLC-METHOD=(%d)>",
ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod,
)
}
func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (*ChunkIHDR, error) {
var (
b = bytes.NewBuffer(c.Data)
ihdr = new(ChunkIHDR)
readf = func(data interface{}) error {
return binary.Read(b, binary.BigEndian, data)
}
)
if err := readf(&ihdr.Width); err != nil {
return nil, err
}
if err := readf(&ihdr.Height); err != nil {
return nil, err
}
if err := readf(&ihdr.BitDepth); err != nil {
return nil, err
}
if err := readf(&ihdr.ColorType); err != nil {
return nil, err
}
if err := readf(&ihdr.CompressionMethod); err != nil {
return nil, err
}
if err := readf(&ihdr.FilterMethod); err != nil {
return nil, err
}
if err := readf(&ihdr.InterlaceMethod); err != nil {
return nil, err
}
return ihdr, nil
}

View file

@ -0,0 +1,85 @@
package pngstructure
import (
"bufio"
"bytes"
"image"
"io"
"os"
"image/png"
riimage "github.com/dsoprea/go-utility/v2/image"
)
// PngMediaParser knows how to parse a PNG stream.
type PngMediaParser struct {
}
// NewPngMediaParser returns a new `PngMediaParser`.
func NewPngMediaParser() riimage.MediaParser {
return new(PngMediaParser)
}
// Parse parses a PNG stream given a `io.ReadSeeker`.
func (pmp *PngMediaParser) Parse(
rs io.ReadSeeker,
size int,
) (riimage.MediaContext, error) {
ps := NewPngSplitter()
if err := ps.readHeader(rs); err != nil {
return nil, err
}
s := bufio.NewScanner(rs)
// Since each segment can be any
// size, our buffer must be allowed
// to grow as large as the file.
buffer := []byte{}
s.Buffer(buffer, size)
s.Split(ps.Split)
for s.Scan() {
}
if err := s.Err(); err != nil {
return nil, err
}
return ps.Chunks()
}
// ParseFile parses a PNG stream given a file-path.
func (pmp *PngMediaParser) ParseFile(filepath string) (riimage.MediaContext, error) {
f, err := os.Open(filepath)
if err != nil {
return nil, err
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
return nil, err
}
size := stat.Size()
return pmp.Parse(f, int(size))
}
// ParseBytes parses a PNG stream given a byte-slice.
func (pmp *PngMediaParser) ParseBytes(data []byte) (riimage.MediaContext, error) {
br := bytes.NewReader(data)
return pmp.Parse(br, len(data))
}
// LooksLikeFormat returns a boolean indicating
// whether the stream looks like a PNG image.
func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool {
return bytes.Equal(data[:len(PngSignature)], PngSignature[:])
}
// GetImage returns an image.Image-compatible struct.
func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) {
return png.Decode(r)
}

View file

@ -0,0 +1,386 @@
package pngstructure
import (
"bytes"
"errors"
"fmt"
"io"
"encoding/binary"
"hash/crc32"
"github.com/dsoprea/go-exif/v3"
exifcommon "github.com/dsoprea/go-exif/v3/common"
riimage "github.com/dsoprea/go-utility/v2/image"
)
var (
PngSignature = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'}
EXifChunkType = "eXIf"
IHDRChunkType = "IHDR"
)
var (
ErrNotPng = errors.New("not png data")
ErrCrcFailure = errors.New("crc failure")
)
// ChunkSlice encapsulates a slice of chunks.
type ChunkSlice struct {
chunks []*Chunk
}
func NewChunkSlice(chunks []*Chunk) (*ChunkSlice, error) {
if len(chunks) == 0 {
err := errors.New("ChunkSlice must be initialized with at least one chunk (IHDR)")
return nil, err
} else if chunks[0].Type != IHDRChunkType {
err := errors.New("first chunk in any ChunkSlice must be an IHDR")
return nil, err
}
return &ChunkSlice{chunks}, nil
}
func NewPngChunkSlice() (*ChunkSlice, error) {
ihdrChunk := &Chunk{
Type: IHDRChunkType,
}
ihdrChunk.UpdateCrc32()
return NewChunkSlice([]*Chunk{ihdrChunk})
}
func (cs *ChunkSlice) String() string {
return fmt.Sprintf("ChunkSlize<LEN=(%d)>", len(cs.chunks))
}
// Chunks exposes the actual slice.
func (cs *ChunkSlice) Chunks() []*Chunk {
return cs.chunks
}
// Write encodes and writes all chunks.
func (cs *ChunkSlice) WriteTo(w io.Writer) error {
if _, err := w.Write(PngSignature[:]); err != nil {
return err
}
// TODO(dustin): !! This should respect
// the safe-to-copy characteristic.
for _, c := range cs.chunks {
if _, err := c.WriteTo(w); err != nil {
return err
}
}
return nil
}
// Index returns a map of chunk types to chunk slices, grouping all like chunks.
func (cs *ChunkSlice) Index() (index map[string][]*Chunk) {
index = make(map[string][]*Chunk)
for _, c := range cs.chunks {
if grouped, found := index[c.Type]; found {
index[c.Type] = append(grouped, c)
} else {
index[c.Type] = []*Chunk{c}
}
}
return index
}
// FindExif returns the the segment that hosts the EXIF data.
func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) {
index := cs.Index()
if chunks, found := index[EXifChunkType]; found {
return chunks[0], nil
}
return nil, exif.ErrNoExif
}
// Exif returns an `exif.Ifd` instance with the existing tags.
func (cs *ChunkSlice) Exif() (*exif.Ifd, []byte, error) {
chunk, err := cs.FindExif()
if err != nil {
return nil, nil, err
}
im, err := exifcommon.NewIfdMappingWithStandard()
if err != nil {
return nil, nil, err
}
ti := exif.NewTagIndex()
_, index, err := exif.Collect(im, ti, chunk.Data)
if err != nil {
return nil, nil, err
}
return index.RootIfd, chunk.Data, nil
}
// ConstructExifBuilder returns an `exif.IfdBuilder` instance
// (needed for modifying) preloaded with all existing tags.
func (cs *ChunkSlice) ConstructExifBuilder() (*exif.IfdBuilder, error) {
rootIfd, _, err := cs.Exif()
if err != nil {
return nil, err
}
return exif.NewIfdBuilderFromExistingChain(rootIfd), nil
}
// SetExif encodes and sets EXIF data into this segment.
func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) error {
// Encode.
ibe := exif.NewIfdByteEncoder()
exifData, err := ibe.EncodeToExif(ib)
if err != nil {
return err
}
// Set.
exifChunk, err := cs.FindExif()
switch {
case err == nil:
// EXIF chunk already exists.
exifChunk.Data = exifData
exifChunk.Length = uint32(len(exifData))
case errors.Is(err, exif.ErrNoExif):
// Add a EXIF chunk for the first time.
exifChunk = &Chunk{
Type: EXifChunkType,
Data: exifData,
Length: uint32(len(exifData)),
}
// Insert exif after the IHDR chunk; it's
// a reliably appropriate place to put it.
cs.chunks = append(
cs.chunks[:1],
append(
[]*Chunk{exifChunk},
cs.chunks[1:]...,
)...,
)
default:
return err
}
exifChunk.UpdateCrc32()
return nil
}
// PngSplitter hosts the princpal `Split()`
// method uses by `bufio.Scanner`.
type PngSplitter struct {
chunks []*Chunk
currentOffset int
doCheckCrc bool
crcErrors []string
}
func (ps *PngSplitter) Chunks() (*ChunkSlice, error) {
return NewChunkSlice(ps.chunks)
}
func (ps *PngSplitter) DoCheckCrc(doCheck bool) {
ps.doCheckCrc = doCheck
}
func (ps *PngSplitter) CrcErrors() []string {
return ps.crcErrors
}
func NewPngSplitter() *PngSplitter {
return &PngSplitter{
chunks: make([]*Chunk, 0),
doCheckCrc: true,
crcErrors: make([]string, 0),
}
}
// Chunk describes a single chunk.
type Chunk struct {
Offset int
Length uint32
Type string
Data []byte
Crc uint32
}
func (c *Chunk) String() string {
return fmt.Sprintf("Chunk<OFFSET=(%d) LENGTH=(%d) TYPE=[%s] CRC=(%d)>", c.Offset, c.Length, c.Type, c.Crc)
}
func calculateCrc32(chunk *Chunk) uint32 {
c := crc32.NewIEEE()
c.Write([]byte(chunk.Type))
c.Write(chunk.Data)
return c.Sum32()
}
func (c *Chunk) UpdateCrc32() {
c.Crc = calculateCrc32(c)
}
func (c *Chunk) CheckCrc32() bool {
expected := calculateCrc32(c)
return c.Crc == expected
}
// Bytes encodes and returns the bytes for this chunk.
func (c *Chunk) Bytes() ([]byte, error) {
if len(c.Data) != int(c.Length) {
return nil, errors.New("length of data not correct")
}
b := make([]byte, 0, 4+4+c.Length+4)
b = binary.BigEndian.AppendUint32(b, c.Length)
b = append(b, c.Type...)
b = append(b, c.Data...)
b = binary.BigEndian.AppendUint32(b, c.Crc)
return b, nil
}
// Write encodes and writes the bytes for this chunk.
func (c *Chunk) WriteTo(w io.Writer) (int, error) {
if len(c.Data) != int(c.Length) {
return 0, errors.New("length of data not correct")
}
var n int
b := make([]byte, 4) // uint32 buf
binary.BigEndian.PutUint32(b, c.Length)
if nn, err := w.Write(b); err != nil {
return n + nn, err
}
n += len(b)
if nn, err := io.WriteString(w, c.Type); err != nil {
return n + nn, err
}
n += len(c.Type)
if nn, err := w.Write(c.Data); err != nil {
return n + nn, err
}
n += len(c.Data)
binary.BigEndian.PutUint32(b, c.Crc)
if nn, err := w.Write(b); err != nil {
return n + nn, err
}
n += len(b)
return n, nil
}
// readHeader verifies that the PNG header bytes appear next.
func (ps *PngSplitter) readHeader(r io.Reader) error {
var (
sigLen = len(PngSignature)
header = make([]byte, sigLen)
)
if _, err := r.Read(header); err != nil {
return err
}
ps.currentOffset += sigLen
if !bytes.Equal(header, PngSignature[:]) {
return ErrNotPng
}
return nil
}
// Split fulfills the `bufio.SplitFunc`
// function definition for `bufio.Scanner`.
func (ps *PngSplitter) Split(
data []byte,
atEOF bool,
) (
advance int,
token []byte,
err error,
) {
// We might have more than one chunk's worth, and,
// if `atEOF` is true, we won't be called again.
// We'll repeatedly try to read additional chunks,
// but, when we run out of the data we were given
// then we'll return the number of bytes for the
// chunks we've already completely read. Then, we'll
// be called again from the end ofthose bytes, at
// which point we'll indicate that we don't yet have
// enough for another chunk, and we should be then
// called with more.
for {
len_ := len(data)
if len_ < 8 {
return advance, nil, nil
}
length := binary.BigEndian.Uint32(data[:4])
type_ := string(data[4:8])
chunkSize := (8 + int(length) + 4)
if len_ < chunkSize {
return advance, nil, nil
}
crcIndex := 8 + length
crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4])
content := make([]byte, length)
copy(content, data[8:8+length])
c := &Chunk{
Length: length,
Type: type_,
Data: content,
Crc: crc,
Offset: ps.currentOffset,
}
ps.chunks = append(ps.chunks, c)
if !c.CheckCrc32() {
ps.crcErrors = append(ps.crcErrors, type_)
if ps.doCheckCrc {
err = ErrCrcFailure
return
}
}
advance += chunkSize
ps.currentOffset += chunkSize
data = data[chunkSize:]
}
}
var (
// Enforce interface conformance.
_ riimage.MediaContext = new(ChunkSlice)
)

View file

@ -0,0 +1,77 @@
package pngstructure
import (
"fmt"
"os"
"path"
)
var (
assetsPath = "assets"
)
func getModuleRootPath() (string, error) {
moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH")
if moduleRootPath != "" {
return moduleRootPath, nil
}
currentWd, err := os.Getwd()
if err != nil {
return "", err
}
currentPath := currentWd
visited := make([]string, 0)
for {
tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT")
_, err := os.Stat(tryStampFilepath)
if err != nil && !os.IsNotExist(err) {
return "", err
} else if err == nil {
break
}
visited = append(visited, tryStampFilepath)
currentPath = path.Dir(currentPath)
if currentPath == "/" {
return "", fmt.Errorf("could not find module-root: %v", visited)
}
}
return currentPath, nil
}
func getTestAssetsPath() (string, error) {
if assetsPath == "" {
moduleRootPath, err := getModuleRootPath()
if err != nil {
return "", err
}
assetsPath = path.Join(moduleRootPath, "assets")
}
return assetsPath, nil
}
func getTestBasicImageFilepath() (string, error) {
assetsPath, err := getTestAssetsPath()
if err != nil {
return "", err
}
return path.Join(assetsPath, "libpng.png"), nil
}
func getTestExifImageFilepath() (string, error) {
assetsPath, err := getTestAssetsPath()
if err != nil {
return "", err
}
return path.Join(assetsPath, "exif.png"), nil
}

View file

@ -0,0 +1,67 @@
package pngstructure
import (
"bytes"
"fmt"
)
func DumpBytes(data []byte) {
fmt.Printf("DUMP: ")
for _, x := range data {
fmt.Printf("%02x ", x)
}
fmt.Printf("\n")
}
func DumpBytesClause(data []byte) {
fmt.Printf("DUMP: ")
fmt.Printf("[]byte { ")
for i, x := range data {
fmt.Printf("0x%02x", x)
if i < len(data)-1 {
fmt.Printf(", ")
}
}
fmt.Printf(" }\n")
}
func DumpBytesToString(data []byte) (string, error) {
b := new(bytes.Buffer)
for i, x := range data {
if _, err := b.WriteString(fmt.Sprintf("%02x", x)); err != nil {
return "", err
}
if i < len(data)-1 {
if _, err := b.WriteRune(' '); err != nil {
return "", err
}
}
}
return b.String(), nil
}
func DumpBytesClauseToString(data []byte) (string, error) {
b := new(bytes.Buffer)
for i, x := range data {
if _, err := b.WriteString(fmt.Sprintf("0x%02x", x)); err != nil {
return "", err
}
if i < len(data)-1 {
if _, err := b.WriteString(", "); err != nil {
return "", err
}
}
}
return b.String(), nil
}