mirror of
				https://github.com/superseriousbusiness/gotosocial.git
				synced 2025-10-31 14:22:25 -05:00 
			
		
		
		
	[bugfix] Update exif-terminator (fix png issue) (#2391)
* [bugfix] Update exif-terminator (fix png issue) * bump exif terminator * fix tests
This commit is contained in:
		
					parent
					
						
							
								6abe91ceb2
							
						
					
				
			
			
				commit
				
					
						0108463e7b
					
				
			
		
					 21 changed files with 752 additions and 830 deletions
				
			
		
							
								
								
									
										4
									
								
								go.mod
									
										
									
									
									
								
							
							
						
						
									
										4
									
								
								go.mod
									
										
									
									
									
								
							|  | @ -45,7 +45,7 @@ require ( | |||
| 	github.com/spf13/viper v1.16.0 | ||||
| 	github.com/stretchr/testify v1.8.4 | ||||
| 	github.com/superseriousbusiness/activity v1.4.0-gts | ||||
| 	github.com/superseriousbusiness/exif-terminator v0.5.0 | ||||
| 	github.com/superseriousbusiness/exif-terminator v0.6.0 | ||||
| 	github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 | ||||
| 	github.com/tdewolff/minify/v2 v2.20.7 | ||||
| 	github.com/technologize/otel-go-contrib v1.1.0 | ||||
|  | @ -101,7 +101,6 @@ require ( | |||
| 	github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 // indirect | ||||
| 	github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect | ||||
| 	github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d // indirect | ||||
| 	github.com/dsoprea/go-png-image-structure/v2 v2.0.0-20210512210324-29b889a6093d // indirect | ||||
| 	github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e // indirect | ||||
| 	github.com/dustin/go-humanize v1.0.1 // indirect | ||||
| 	github.com/fsnotify/fsnotify v1.7.0 // indirect | ||||
|  | @ -160,6 +159,7 @@ require ( | |||
| 	github.com/spf13/pflag v1.0.5 // indirect | ||||
| 	github.com/subosito/gotenv v1.4.2 // indirect | ||||
| 	github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe // indirect | ||||
| 	github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB // indirect | ||||
| 	github.com/tdewolff/parse/v2 v2.7.5 // indirect | ||||
| 	github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect | ||||
| 	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect | ||||
|  |  | |||
							
								
								
									
										8
									
								
								go.sum
									
										
									
									
									
								
							
							
						
						
									
										8
									
								
								go.sum
									
										
									
									
									
								
							|  | @ -152,8 +152,6 @@ github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd h1:l+vLbuxptsC6 | |||
| github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd/go.mod h1:7I+3Pe2o/YSU88W0hWlm9S22W7XI1JFNJ86U0zPKMf8= | ||||
| github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d h1:dg6UMHa50VI01WuPWXPbNJpO8QSyvIF5T5n2IZiqX3A= | ||||
| github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d/go.mod h1:pqKB+ijp27cEcrHxhXVgUUMlSDRuGJJp1E+20Lj5H0E= | ||||
| github.com/dsoprea/go-png-image-structure/v2 v2.0.0-20210512210324-29b889a6093d h1:2zNIgrJTspLxUKoJGl0Ln24+hufPKSjP3cu4++5MeSE= | ||||
| github.com/dsoprea/go-png-image-structure/v2 v2.0.0-20210512210324-29b889a6093d/go.mod h1:scnx0wQSM7UiCMK66dSdiPZvL2hl6iF5DvpZ7uT59MY= | ||||
| github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf/go.mod h1:95+K3z2L0mqsVYd6yveIv1lmtT3tcQQ3dVakPySffW8= | ||||
| github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e h1:IxIbA7VbCNrwumIYjDoMOdf4KOSkMC6NJE4s8oRbE7E= | ||||
| github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e/go.mod h1:uAzdkPTub5Y9yQwXe8W4m2XuP0tK4a9Q/dantD0+uaU= | ||||
|  | @ -505,10 +503,12 @@ github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNG | |||
| github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I= | ||||
| github.com/superseriousbusiness/activity v1.4.0-gts h1:9r95sYy80tuGWWpDDNlLwa/k6dKZdyP/k+rhVA+VjdQ= | ||||
| github.com/superseriousbusiness/activity v1.4.0-gts/go.mod h1:AZw0Xb4Oju8rmaJCZ21gc5CPg47MmNgyac+Hx5jo8VM= | ||||
| github.com/superseriousbusiness/exif-terminator v0.5.0 h1:57SO/geyaOl2v/lJSQLVcQbdghpyFuK8ZTtaHL81fUQ= | ||||
| github.com/superseriousbusiness/exif-terminator v0.5.0/go.mod h1:d5IkskXco/3XRXzOrI73uGYn+wahJEqPlQSSqn6jxSw= | ||||
| github.com/superseriousbusiness/exif-terminator v0.6.0 h1:f8FM4R/Au7iB0PGfSKjKAEYMGvM2HHbd1qLxrfmuNFk= | ||||
| github.com/superseriousbusiness/exif-terminator v0.6.0/go.mod h1:HjbpsIyuK6PguA/Rla+Hz0+A9pF6iWf9qGLhqpAN68k= | ||||
| github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe h1:ksl2oCx/Qo8sNDc3Grb8WGKBM9nkvhCm25uvlT86azE= | ||||
| github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe/go.mod h1:gH4P6gN1V+wmIw5o97KGaa1RgXB/tVpC2UNzijhg3E4= | ||||
| github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB h1:8psprYSK1KdOSH7yQ4PbJq0YYaGQY+gzdW/B0ExDb/8= | ||||
| github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB/go.mod h1:ymKGfy9kg4dIdraeZRAdobMS/flzLk3VcRPLpEWOAXg= | ||||
| github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 h1:nTIhuP157oOFcscuoK1kCme1xTeGIzztSw70lX9NrDQ= | ||||
| github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8/go.mod h1:uYC/W92oVRJ49Vh1GcvTqpeFqHi+Ovrl2sMllQWRAEo= | ||||
| github.com/tdewolff/minify/v2 v2.20.7 h1:NUkuzJ9dvQUNJjSdmmrfELa/ZpnMdyMR/ZKU2bw7N/E= | ||||
|  |  | |||
|  | @ -53,8 +53,7 @@ type Manager struct { | |||
| 
 | ||||
| // NewManager returns a media manager with given state. | ||||
| func NewManager(state *state.State) *Manager { | ||||
| 	m := &Manager{state: state} | ||||
| 	return m | ||||
| 	return &Manager{state: state} | ||||
| } | ||||
| 
 | ||||
| // PreProcessMedia begins the process of decoding | ||||
|  |  | |||
|  | @ -404,7 +404,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessPartial() { | |||
| 
 | ||||
| 	// Since we're cutting off the byte stream | ||||
| 	// halfway through, we should get an error here. | ||||
| 	suite.EqualError(err, "finish: error decoding image: unexpected EOF") | ||||
| 	suite.EqualError(err, "store: error writing media to storage: scan-data is unbounded; EOI not encountered before EOF") | ||||
| 	suite.NotNil(attachment) | ||||
| 
 | ||||
| 	// make sure it's got the stuff set on it that we expect | ||||
|  |  | |||
|  | @ -29,7 +29,7 @@ import ( | |||
| 	"codeberg.org/gruf/go-runners" | ||||
| 	"github.com/disintegration/imaging" | ||||
| 	"github.com/h2non/filetype" | ||||
| 	terminator "github.com/superseriousbusiness/exif-terminator" | ||||
| 	"github.com/superseriousbusiness/exif-terminator" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtserror" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/gtsmodel" | ||||
| 	"github.com/superseriousbusiness/gotosocial/internal/log" | ||||
|  |  | |||
										
											Binary file not shown.
										
									
								
							| Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB | 
							
								
								
									
										87
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/chunk_decoder.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										87
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/chunk_decoder.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,87 +0,0 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"encoding/binary" | ||||
| 
 | ||||
| 	"github.com/dsoprea/go-logging" | ||||
| ) | ||||
| 
 | ||||
| type ChunkDecoder struct { | ||||
| } | ||||
| 
 | ||||
| func NewChunkDecoder() *ChunkDecoder { | ||||
| 	return new(ChunkDecoder) | ||||
| } | ||||
| 
 | ||||
| func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err := log.Wrap(state.(error)) | ||||
| 			log.Panic(err) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	switch c.Type { | ||||
| 	case "IHDR": | ||||
| 		ihdr, err := cd.decodeIHDR(c) | ||||
| 		log.PanicIf(err) | ||||
| 
 | ||||
| 		return ihdr, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// We don't decode this particular type. | ||||
| 	return nil, nil | ||||
| } | ||||
| 
 | ||||
| type ChunkIHDR struct { | ||||
| 	Width             uint32 | ||||
| 	Height            uint32 | ||||
| 	BitDepth          uint8 | ||||
| 	ColorType         uint8 | ||||
| 	CompressionMethod uint8 | ||||
| 	FilterMethod      uint8 | ||||
| 	InterlaceMethod   uint8 | ||||
| } | ||||
| 
 | ||||
| func (ihdr *ChunkIHDR) String() string { | ||||
| 	return fmt.Sprintf("IHDR<WIDTH=(%d) HEIGHT=(%d) DEPTH=(%d) COLOR-TYPE=(%d) COMP-METHOD=(%d) FILTER-METHOD=(%d) INTRLC-METHOD=(%d)>", ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod) | ||||
| } | ||||
| 
 | ||||
| func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (ihdr *ChunkIHDR, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err := log.Wrap(state.(error)) | ||||
| 			log.Panic(err) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	b := bytes.NewBuffer(c.Data) | ||||
| 
 | ||||
| 	ihdr = new(ChunkIHDR) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.Width) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.Height) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.BitDepth) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.ColorType) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.CompressionMethod) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.FilterMethod) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Read(b, binary.BigEndian, &ihdr.InterlaceMethod) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return ihdr, nil | ||||
| } | ||||
							
								
								
									
										118
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/media_parser.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										118
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/media_parser.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,118 +0,0 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"image" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 
 | ||||
| 	"image/png" | ||||
| 
 | ||||
| 	"github.com/dsoprea/go-logging" | ||||
| 	"github.com/dsoprea/go-utility/v2/image" | ||||
| ) | ||||
| 
 | ||||
| // PngMediaParser knows how to parse a PNG stream. | ||||
| type PngMediaParser struct { | ||||
| } | ||||
| 
 | ||||
| // NewPngMediaParser returns a new `PngMediaParser` struct. | ||||
| func NewPngMediaParser() *PngMediaParser { | ||||
| 
 | ||||
| 	// TODO(dustin): Add test | ||||
| 
 | ||||
| 	return new(PngMediaParser) | ||||
| } | ||||
| 
 | ||||
| // Parse parses a PNG stream given a `io.ReadSeeker`. | ||||
| func (pmp *PngMediaParser) Parse(rs io.ReadSeeker, size int) (mc riimage.MediaContext, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	// TODO(dustin): Add test | ||||
| 
 | ||||
| 	ps := NewPngSplitter() | ||||
| 
 | ||||
| 	err = ps.readHeader(rs) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	s := bufio.NewScanner(rs) | ||||
| 
 | ||||
| 	// Since each segment can be any size, our buffer must be allowed to grow | ||||
| 	// as large as the file. | ||||
| 	buffer := []byte{} | ||||
| 	s.Buffer(buffer, size) | ||||
| 	s.Split(ps.Split) | ||||
| 
 | ||||
| 	for s.Scan() != false { | ||||
| 	} | ||||
| 
 | ||||
| 	log.PanicIf(s.Err()) | ||||
| 
 | ||||
| 	return ps.Chunks(), nil | ||||
| } | ||||
| 
 | ||||
| // ParseFile parses a PNG stream given a file-path. | ||||
| func (pmp *PngMediaParser) ParseFile(filepath string) (mc riimage.MediaContext, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	f, err := os.Open(filepath) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	defer f.Close() | ||||
| 
 | ||||
| 	stat, err := f.Stat() | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	size := stat.Size() | ||||
| 
 | ||||
| 	chunks, err := pmp.Parse(f, int(size)) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return chunks, nil | ||||
| } | ||||
| 
 | ||||
| // ParseBytes parses a PNG stream given a byte-slice. | ||||
| func (pmp *PngMediaParser) ParseBytes(data []byte) (mc riimage.MediaContext, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	// TODO(dustin): Add test | ||||
| 
 | ||||
| 	br := bytes.NewReader(data) | ||||
| 
 | ||||
| 	chunks, err := pmp.Parse(br, len(data)) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return chunks, nil | ||||
| } | ||||
| 
 | ||||
| // LooksLikeFormat returns a boolean indicating whether the stream looks like a | ||||
| // PNG image. | ||||
| func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool { | ||||
| 	return bytes.Compare(data[:len(PngSignature)], PngSignature[:]) == 0 | ||||
| } | ||||
| 
 | ||||
| // GetImage returns an image.Image-compatible struct. | ||||
| func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) { | ||||
| 	img, err = png.Decode(r) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return img, nil | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// Enforce interface conformance. | ||||
| 	_ riimage.MediaParser = new(PngMediaParser) | ||||
| ) | ||||
							
								
								
									
										416
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/png.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										416
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/png.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,416 +0,0 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 
 | ||||
| 	"encoding/binary" | ||||
| 	"hash/crc32" | ||||
| 
 | ||||
| 	"github.com/dsoprea/go-exif/v3" | ||||
| 	"github.com/dsoprea/go-exif/v3/common" | ||||
| 	"github.com/dsoprea/go-logging" | ||||
| 	"github.com/dsoprea/go-utility/v2/image" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	PngSignature  = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'} | ||||
| 	EXifChunkType = "eXIf" | ||||
| 	IHDRChunkType = "IHDR" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	ErrNotPng     = errors.New("not png data") | ||||
| 	ErrCrcFailure = errors.New("crc failure") | ||||
| ) | ||||
| 
 | ||||
| // ChunkSlice encapsulates a slice of chunks. | ||||
| type ChunkSlice struct { | ||||
| 	chunks []*Chunk | ||||
| } | ||||
| 
 | ||||
| func NewChunkSlice(chunks []*Chunk) *ChunkSlice { | ||||
| 	if len(chunks) == 0 { | ||||
| 		log.Panicf("ChunkSlice must be initialized with at least one chunk (IHDR)") | ||||
| 	} else if chunks[0].Type != IHDRChunkType { | ||||
| 		log.Panicf("first chunk in any ChunkSlice must be an IHDR") | ||||
| 	} | ||||
| 
 | ||||
| 	return &ChunkSlice{ | ||||
| 		chunks: chunks, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func NewPngChunkSlice() *ChunkSlice { | ||||
| 
 | ||||
| 	ihdrChunk := &Chunk{ | ||||
| 		Type: IHDRChunkType, | ||||
| 	} | ||||
| 
 | ||||
| 	ihdrChunk.UpdateCrc32() | ||||
| 
 | ||||
| 	return NewChunkSlice([]*Chunk{ihdrChunk}) | ||||
| } | ||||
| 
 | ||||
| func (cs *ChunkSlice) String() string { | ||||
| 	return fmt.Sprintf("ChunkSlize<LEN=(%d)>", len(cs.chunks)) | ||||
| } | ||||
| 
 | ||||
| // Chunks exposes the actual slice. | ||||
| func (cs *ChunkSlice) Chunks() []*Chunk { | ||||
| 	return cs.chunks | ||||
| } | ||||
| 
 | ||||
| // Write encodes and writes all chunks. | ||||
| func (cs *ChunkSlice) WriteTo(w io.Writer) (err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	_, err = w.Write(PngSignature[:]) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	// TODO(dustin): !! This should respect the safe-to-copy characteristic. | ||||
| 	for _, c := range cs.chunks { | ||||
| 		_, err := c.WriteTo(w) | ||||
| 		log.PanicIf(err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Index returns a map of chunk types to chunk slices, grouping all like chunks. | ||||
| func (cs *ChunkSlice) Index() (index map[string][]*Chunk) { | ||||
| 	index = make(map[string][]*Chunk) | ||||
| 	for _, c := range cs.chunks { | ||||
| 		if grouped, found := index[c.Type]; found == true { | ||||
| 			index[c.Type] = append(grouped, c) | ||||
| 		} else { | ||||
| 			index[c.Type] = []*Chunk{c} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return index | ||||
| } | ||||
| 
 | ||||
| // FindExif returns the the segment that hosts the EXIF data. | ||||
| func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	index := cs.Index() | ||||
| 
 | ||||
| 	if chunks, found := index[EXifChunkType]; found == true { | ||||
| 		return chunks[0], nil | ||||
| 	} | ||||
| 
 | ||||
| 	log.Panic(exif.ErrNoExif) | ||||
| 
 | ||||
| 	// Never called. | ||||
| 	return nil, nil | ||||
| } | ||||
| 
 | ||||
| // Exif returns an `exif.Ifd` instance with the existing tags. | ||||
| func (cs *ChunkSlice) Exif() (rootIfd *exif.Ifd, data []byte, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	chunk, err := cs.FindExif() | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	im, err := exifcommon.NewIfdMappingWithStandard() | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	ti := exif.NewTagIndex() | ||||
| 
 | ||||
| 	// TODO(dustin): Refactor and support `exif.GetExifData()`. | ||||
| 
 | ||||
| 	_, index, err := exif.Collect(im, ti, chunk.Data) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return index.RootIfd, chunk.Data, nil | ||||
| } | ||||
| 
 | ||||
| // ConstructExifBuilder returns an `exif.IfdBuilder` instance (needed for | ||||
| // modifying) preloaded with all existing tags. | ||||
| func (cs *ChunkSlice) ConstructExifBuilder() (rootIb *exif.IfdBuilder, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	rootIfd, _, err := cs.Exif() | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	ib := exif.NewIfdBuilderFromExistingChain(rootIfd) | ||||
| 
 | ||||
| 	return ib, nil | ||||
| } | ||||
| 
 | ||||
| // SetExif encodes and sets EXIF data into this segment. | ||||
| func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) (err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	// Encode. | ||||
| 
 | ||||
| 	ibe := exif.NewIfdByteEncoder() | ||||
| 
 | ||||
| 	exifData, err := ibe.EncodeToExif(ib) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	// Set. | ||||
| 
 | ||||
| 	exifChunk, err := cs.FindExif() | ||||
| 	if err == nil { | ||||
| 		// EXIF chunk already exists. | ||||
| 
 | ||||
| 		exifChunk.Data = exifData | ||||
| 		exifChunk.Length = uint32(len(exifData)) | ||||
| 	} else { | ||||
| 		if log.Is(err, exif.ErrNoExif) != true { | ||||
| 			log.Panic(err) | ||||
| 		} | ||||
| 
 | ||||
| 		// Add a EXIF chunk for the first time. | ||||
| 
 | ||||
| 		exifChunk = &Chunk{ | ||||
| 			Type:   EXifChunkType, | ||||
| 			Data:   exifData, | ||||
| 			Length: uint32(len(exifData)), | ||||
| 		} | ||||
| 
 | ||||
| 		// Insert it after the IHDR chunk (it's a reliably appropriate place to | ||||
| 		// put it). | ||||
| 		cs.chunks = append(cs.chunks[:1], append([]*Chunk{exifChunk}, cs.chunks[1:]...)...) | ||||
| 	} | ||||
| 
 | ||||
| 	exifChunk.UpdateCrc32() | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // PngSplitter hosts the princpal `Split()` method uses by `bufio.Scanner`. | ||||
| type PngSplitter struct { | ||||
| 	chunks        []*Chunk | ||||
| 	currentOffset int | ||||
| 
 | ||||
| 	doCheckCrc bool | ||||
| 	crcErrors  []string | ||||
| } | ||||
| 
 | ||||
| func (ps *PngSplitter) Chunks() *ChunkSlice { | ||||
| 	return NewChunkSlice(ps.chunks) | ||||
| } | ||||
| 
 | ||||
| func (ps *PngSplitter) DoCheckCrc(doCheck bool) { | ||||
| 	ps.doCheckCrc = doCheck | ||||
| } | ||||
| 
 | ||||
| func (ps *PngSplitter) CrcErrors() []string { | ||||
| 	return ps.crcErrors | ||||
| } | ||||
| 
 | ||||
| func NewPngSplitter() *PngSplitter { | ||||
| 	return &PngSplitter{ | ||||
| 		chunks:     make([]*Chunk, 0), | ||||
| 		doCheckCrc: true, | ||||
| 		crcErrors:  make([]string, 0), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Chunk describes a single chunk. | ||||
| type Chunk struct { | ||||
| 	Offset int | ||||
| 	Length uint32 | ||||
| 	Type   string | ||||
| 	Data   []byte | ||||
| 	Crc    uint32 | ||||
| } | ||||
| 
 | ||||
| func (c *Chunk) String() string { | ||||
| 	return fmt.Sprintf("Chunk<OFFSET=(%d) LENGTH=(%d) TYPE=[%s] CRC=(%d)>", c.Offset, c.Length, c.Type, c.Crc) | ||||
| } | ||||
| 
 | ||||
| func calculateCrc32(chunk *Chunk) uint32 { | ||||
| 	c := crc32.NewIEEE() | ||||
| 
 | ||||
| 	c.Write([]byte(chunk.Type)) | ||||
| 	c.Write(chunk.Data) | ||||
| 
 | ||||
| 	return c.Sum32() | ||||
| } | ||||
| 
 | ||||
| func (c *Chunk) UpdateCrc32() { | ||||
| 	c.Crc = calculateCrc32(c) | ||||
| } | ||||
| 
 | ||||
| func (c *Chunk) CheckCrc32() bool { | ||||
| 	expected := calculateCrc32(c) | ||||
| 	return c.Crc == expected | ||||
| } | ||||
| 
 | ||||
| // Bytes encodes and returns the bytes for this chunk. | ||||
| func (c *Chunk) Bytes() []byte { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err := log.Wrap(state.(error)) | ||||
| 			log.Panic(err) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	if len(c.Data) != int(c.Length) { | ||||
| 		log.Panicf("length of data not correct") | ||||
| 	} | ||||
| 
 | ||||
| 	preallocated := make([]byte, 0, 4+4+c.Length+4) | ||||
| 	b := bytes.NewBuffer(preallocated) | ||||
| 
 | ||||
| 	err := binary.Write(b, binary.BigEndian, c.Length) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	_, err = b.Write([]byte(c.Type)) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	if c.Data != nil { | ||||
| 		_, err = b.Write(c.Data) | ||||
| 		log.PanicIf(err) | ||||
| 	} | ||||
| 
 | ||||
| 	err = binary.Write(b, binary.BigEndian, c.Crc) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return b.Bytes() | ||||
| } | ||||
| 
 | ||||
| // Write encodes and writes the bytes for this chunk. | ||||
| func (c *Chunk) WriteTo(w io.Writer) (count int, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	if len(c.Data) != int(c.Length) { | ||||
| 		log.Panicf("length of data not correct") | ||||
| 	} | ||||
| 
 | ||||
| 	err = binary.Write(w, binary.BigEndian, c.Length) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	_, err = w.Write([]byte(c.Type)) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	_, err = w.Write(c.Data) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	err = binary.Write(w, binary.BigEndian, c.Crc) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	return 4 + len(c.Type) + len(c.Data) + 4, nil | ||||
| } | ||||
| 
 | ||||
| // readHeader verifies that the PNG header bytes appear next. | ||||
| func (ps *PngSplitter) readHeader(r io.Reader) (err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	len_ := len(PngSignature) | ||||
| 	header := make([]byte, len_) | ||||
| 
 | ||||
| 	_, err = r.Read(header) | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	ps.currentOffset += len_ | ||||
| 
 | ||||
| 	if bytes.Compare(header, PngSignature[:]) != 0 { | ||||
| 		log.Panic(ErrNotPng) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Split fulfills the `bufio.SplitFunc` function definition for | ||||
| // `bufio.Scanner`. | ||||
| func (ps *PngSplitter) Split(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 	defer func() { | ||||
| 		if state := recover(); state != nil { | ||||
| 			err = log.Wrap(state.(error)) | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	// We might have more than one chunk's worth, and, if `atEOF` is true, we | ||||
| 	// won't be called again. We'll repeatedly try to read additional chunks, | ||||
| 	// but, when we run out of the data we were given then we'll return the | ||||
| 	// number of bytes fo rthe chunks we've already completely read. Then, | ||||
| 	// we'll be called again from theend ofthose bytes, at which point we'll | ||||
| 	// indicate that we don't yet have enough for another chunk, and we should | ||||
| 	// be then called with more. | ||||
| 	for { | ||||
| 		len_ := len(data) | ||||
| 		if len_ < 8 { | ||||
| 			return advance, nil, nil | ||||
| 		} | ||||
| 
 | ||||
| 		length := binary.BigEndian.Uint32(data[:4]) | ||||
| 		type_ := string(data[4:8]) | ||||
| 		chunkSize := (8 + int(length) + 4) | ||||
| 
 | ||||
| 		if len_ < chunkSize { | ||||
| 			return advance, nil, nil | ||||
| 		} | ||||
| 
 | ||||
| 		crcIndex := 8 + length | ||||
| 		crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4]) | ||||
| 
 | ||||
| 		content := make([]byte, length) | ||||
| 		copy(content, data[8:8+length]) | ||||
| 
 | ||||
| 		c := &Chunk{ | ||||
| 			Length: length, | ||||
| 			Type:   type_, | ||||
| 			Data:   content, | ||||
| 			Crc:    crc, | ||||
| 			Offset: ps.currentOffset, | ||||
| 		} | ||||
| 
 | ||||
| 		ps.chunks = append(ps.chunks, c) | ||||
| 
 | ||||
| 		if c.CheckCrc32() == false { | ||||
| 			ps.crcErrors = append(ps.crcErrors, type_) | ||||
| 
 | ||||
| 			if ps.doCheckCrc == true { | ||||
| 				log.Panic(ErrCrcFailure) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		advance += chunkSize | ||||
| 		ps.currentOffset += chunkSize | ||||
| 
 | ||||
| 		data = data[chunkSize:] | ||||
| 	} | ||||
| 
 | ||||
| 	return advance, nil, nil | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// Enforce interface conformance. | ||||
| 	_ riimage.MediaContext = new(ChunkSlice) | ||||
| ) | ||||
							
								
								
									
										64
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/testing_common.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/dsoprea/go-png-image-structure/v2/testing_common.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,64 +0,0 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"os" | ||||
| 	"path" | ||||
| 
 | ||||
| 	"github.com/dsoprea/go-logging" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	assetsPath = "" | ||||
| ) | ||||
| 
 | ||||
| func getModuleRootPath() string { | ||||
| 	moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH") | ||||
| 	if moduleRootPath != "" { | ||||
| 		return moduleRootPath | ||||
| 	} | ||||
| 
 | ||||
| 	currentWd, err := os.Getwd() | ||||
| 	log.PanicIf(err) | ||||
| 
 | ||||
| 	currentPath := currentWd | ||||
| 	visited := make([]string, 0) | ||||
| 
 | ||||
| 	for { | ||||
| 		tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") | ||||
| 
 | ||||
| 		_, err := os.Stat(tryStampFilepath) | ||||
| 		if err != nil && os.IsNotExist(err) != true { | ||||
| 			log.Panic(err) | ||||
| 		} else if err == nil { | ||||
| 			break | ||||
| 		} | ||||
| 
 | ||||
| 		visited = append(visited, tryStampFilepath) | ||||
| 
 | ||||
| 		currentPath = path.Dir(currentPath) | ||||
| 		if currentPath == "/" { | ||||
| 			log.Panicf("could not find module-root: %v", visited) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return currentPath | ||||
| } | ||||
| 
 | ||||
| func getTestAssetsPath() string { | ||||
| 	if assetsPath == "" { | ||||
| 		moduleRootPath := getModuleRootPath() | ||||
| 		assetsPath = path.Join(moduleRootPath, "assets") | ||||
| 	} | ||||
| 
 | ||||
| 	return assetsPath | ||||
| } | ||||
| 
 | ||||
| func getTestBasicImageFilepath() string { | ||||
| 	assetsPath := getTestAssetsPath() | ||||
| 	return path.Join(assetsPath, "libpng.png") | ||||
| } | ||||
| 
 | ||||
| func getTestExifImageFilepath() string { | ||||
| 	assetsPath := getTestAssetsPath() | ||||
| 	return path.Join(assetsPath, "exif.png") | ||||
| } | ||||
							
								
								
									
										47
									
								
								vendor/github.com/superseriousbusiness/exif-terminator/logger.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										47
									
								
								vendor/github.com/superseriousbusiness/exif-terminator/logger.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -1,47 +0,0 @@ | |||
| /* | ||||
|    exif-terminator | ||||
|    Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org | ||||
| 
 | ||||
|    This program is free software: you can redistribute it and/or modify | ||||
|    it under the terms of the GNU Affero General Public License as published by | ||||
|    the Free Software Foundation, either version 3 of the License, or | ||||
|    (at your option) any later version. | ||||
| 
 | ||||
|    This program is distributed in the hope that it will be useful, | ||||
|    but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|    GNU Affero General Public License for more details. | ||||
| 
 | ||||
|    You should have received a copy of the GNU Affero General Public License | ||||
|    along with this program.  If not, see <http://www.gnu.org/licenses/>. | ||||
| */ | ||||
| 
 | ||||
| package terminator | ||||
| 
 | ||||
| import "fmt" | ||||
| 
 | ||||
| var logger ErrorLogger | ||||
| 
 | ||||
| func init() { | ||||
| 	logger = &defaultErrorLogger{} | ||||
| } | ||||
| 
 | ||||
| // ErrorLogger denotes a generic error logging function. | ||||
| type ErrorLogger interface { | ||||
| 	Error(args ...interface{}) | ||||
| } | ||||
| 
 | ||||
| type defaultErrorLogger struct{} | ||||
| 
 | ||||
| func (d *defaultErrorLogger) Error(args ...interface{}) { | ||||
| 	fmt.Println(args...) | ||||
| } | ||||
| 
 | ||||
| // SetErrorLogger allows a user of the exif-terminator library | ||||
| // to set the logger that will be used for error logging. | ||||
| // | ||||
| // If it is not set, the default error logger will be used, which | ||||
| // just prints errors to stdout. | ||||
| func SetErrorLogger(errorLogger ErrorLogger) { | ||||
| 	logger = errorLogger | ||||
| } | ||||
							
								
								
									
										68
									
								
								vendor/github.com/superseriousbusiness/exif-terminator/png.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										68
									
								
								vendor/github.com/superseriousbusiness/exif-terminator/png.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -19,10 +19,9 @@ | |||
| package terminator | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"io" | ||||
| 
 | ||||
| 	pngstructure "github.com/dsoprea/go-png-image-structure/v2" | ||||
| 	pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" | ||||
| ) | ||||
| 
 | ||||
| type pngVisitor struct { | ||||
|  | @ -45,49 +44,50 @@ func (v *pngVisitor) split(data []byte, atEOF bool) (int, []byte, error) { | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// check if the splitter has any new chunks in it that we haven't written yet | ||||
| 	chunkSlice := v.ps.Chunks() | ||||
| 	// Check if the splitter now has | ||||
| 	// any new chunks in it for us. | ||||
| 	chunkSlice, err := v.ps.Chunks() | ||||
| 	if err != nil { | ||||
| 		return advance, token, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Write each chunk by passing it | ||||
| 	// through our custom write func, | ||||
| 	// which strips out exif and fixes | ||||
| 	// the CRC of each chunk. | ||||
| 	chunks := chunkSlice.Chunks() | ||||
| 	for i, chunk := range chunks { | ||||
| 		// look through all the chunks in the splitter | ||||
| 		if i > v.lastWrittenChunk { | ||||
| 			// we've got a chunk we haven't written yet! write it... | ||||
| 			if err := v.writeChunk(chunk); err != nil { | ||||
| 				return advance, token, err | ||||
| 			} | ||||
| 			// then remove the data | ||||
| 			chunk.Data = chunk.Data[:0] | ||||
| 			// and update | ||||
| 			v.lastWrittenChunk = i | ||||
| 		if i <= v.lastWrittenChunk { | ||||
| 			// Skip already | ||||
| 			// written chunks. | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Write this new chunk. | ||||
| 		if err := v.writeChunk(chunk); err != nil { | ||||
| 			return advance, token, err | ||||
| 		} | ||||
| 		v.lastWrittenChunk = i | ||||
| 
 | ||||
| 		// Zero data; here you | ||||
| 		// go garbage collector. | ||||
| 		chunk.Data = nil | ||||
| 	} | ||||
| 
 | ||||
| 	return advance, token, err | ||||
| } | ||||
| 
 | ||||
| func (v *pngVisitor) writeChunk(chunk *pngstructure.Chunk) error { | ||||
| 	if err := binary.Write(v.writer, binary.BigEndian, chunk.Length); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := v.writer.Write([]byte(chunk.Type)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if chunk.Type == pngstructure.EXifChunkType { | ||||
| 		blank := make([]byte, len(chunk.Data)) | ||||
| 		if _, err := v.writer.Write(blank); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} else { | ||||
| 		if _, err := v.writer.Write(chunk.Data); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		// Replace exif data | ||||
| 		// with zero bytes. | ||||
| 		clear(chunk.Data) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := binary.Write(v.writer, binary.BigEndian, chunk.Crc); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	// Fix CRC of each chunk. | ||||
| 	chunk.UpdateCrc32() | ||||
| 
 | ||||
| 	return nil | ||||
| 	// finally, write chunk to writer. | ||||
| 	_, err := chunk.WriteTo(v.writer) | ||||
| 	return err | ||||
| } | ||||
|  |  | |||
							
								
								
									
										94
									
								
								vendor/github.com/superseriousbusiness/exif-terminator/terminator.go
									
										
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										94
									
								
								vendor/github.com/superseriousbusiness/exif-terminator/terminator.go
									
										
									
										generated
									
									
										vendored
									
									
								
							|  | @ -25,29 +25,34 @@ import ( | |||
| 	"fmt" | ||||
| 	"io" | ||||
| 
 | ||||
| 	pngstructure "github.com/dsoprea/go-png-image-structure/v2" | ||||
| 	jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2" | ||||
| 	pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" | ||||
| ) | ||||
| 
 | ||||
| func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error) { | ||||
| 	// to avoid keeping too much stuff in memory we want to pipe data directly | ||||
| 	// To avoid keeping too much stuff | ||||
| 	// in memory we want to pipe data | ||||
| 	// directly to the reader. | ||||
| 	pipeReader, pipeWriter := io.Pipe() | ||||
| 
 | ||||
| 	// we don't know ahead of time how long segments might be: they could be as large as | ||||
| 	// the file itself, so unfortunately we need to allocate a buffer here that'scanner as large | ||||
| 	// as the file | ||||
| 	// We don't know ahead of time how long | ||||
| 	// segments might be: they could be as | ||||
| 	// large as the file itself, so we need | ||||
| 	// a buffer with generous overhead. | ||||
| 	scanner := bufio.NewScanner(in) | ||||
| 	scanner.Buffer([]byte{}, fileSize) | ||||
| 	var err error | ||||
| 
 | ||||
| 	var err error | ||||
| 	switch mediaType { | ||||
| 	case "image/jpeg", "jpeg", "jpg": | ||||
| 		err = terminateJpeg(scanner, pipeWriter, fileSize) | ||||
| 
 | ||||
| 	case "image/webp", "webp": | ||||
| 		err = terminateWebp(scanner, pipeWriter) | ||||
| 
 | ||||
| 	case "image/png", "png": | ||||
| 		// for pngs we need to skip the header bytes, so read them in | ||||
| 		// and check we're really dealing with a png here | ||||
| 		// For pngs we need to skip the header bytes, so read | ||||
| 		// them in and check we're really dealing with a png. | ||||
| 		header := make([]byte, len(pngstructure.PngSignature)) | ||||
| 		if _, headerError := in.Read(header); headerError != nil { | ||||
| 			err = headerError | ||||
|  | @ -67,68 +72,87 @@ func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error) | |||
| 	return pipeReader, err | ||||
| } | ||||
| 
 | ||||
| func terminateJpeg(scanner *bufio.Scanner, writer io.WriteCloser, expectedFileSize int) error { | ||||
| 	// jpeg visitor is where the spicy hack of streaming the de-exifed data is contained | ||||
| func terminateJpeg(scanner *bufio.Scanner, writer *io.PipeWriter, expectedFileSize int) error { | ||||
| 	v := &jpegVisitor{ | ||||
| 		writer:           writer, | ||||
| 		expectedFileSize: expectedFileSize, | ||||
| 	} | ||||
| 
 | ||||
| 	// provide the visitor to the splitter so that it triggers on every section scan | ||||
| 	// Provide the visitor to the splitter so | ||||
| 	// that it triggers on every section scan. | ||||
| 	js := jpegstructure.NewJpegSplitter(v) | ||||
| 
 | ||||
| 	// the visitor also needs to read back the list of segments: for this it needs | ||||
| 	// to know what jpeg splitter it's attached to, so give it a pointer to the splitter | ||||
| 	// The visitor also needs to read back the | ||||
| 	// list of segments: for this it needs to | ||||
| 	// know what jpeg splitter it's attached to, | ||||
| 	// so give it a pointer to the splitter. | ||||
| 	v.js = js | ||||
| 
 | ||||
| 	// use the jpeg splitters 'split' function, which satisfies the bufio.SplitFunc interface | ||||
| 	// Jpeg visitor's 'split' function | ||||
| 	// satisfies bufio.SplitFunc{}. | ||||
| 	scanner.Split(js.Split) | ||||
| 
 | ||||
| 	scanAndClose(scanner, writer) | ||||
| 	go scanAndClose(scanner, writer) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func terminateWebp(scanner *bufio.Scanner, writer io.WriteCloser) error { | ||||
| func terminateWebp(scanner *bufio.Scanner, writer *io.PipeWriter) error { | ||||
| 	v := &webpVisitor{ | ||||
| 		writer: writer, | ||||
| 	} | ||||
| 
 | ||||
| 	// use the webp visitor's 'split' function, which satisfies the bufio.SplitFunc interface | ||||
| 	// Webp visitor's 'split' function | ||||
| 	// satisfies bufio.SplitFunc{}. | ||||
| 	scanner.Split(v.split) | ||||
| 
 | ||||
| 	scanAndClose(scanner, writer) | ||||
| 	go scanAndClose(scanner, writer) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func terminatePng(scanner *bufio.Scanner, writer io.WriteCloser) error { | ||||
| func terminatePng(scanner *bufio.Scanner, writer *io.PipeWriter) error { | ||||
| 	ps := pngstructure.NewPngSplitter() | ||||
| 
 | ||||
| 	// Don't bother checking CRC; | ||||
| 	// we're overwriting it anyway. | ||||
| 	ps.DoCheckCrc(false) | ||||
| 
 | ||||
| 	v := &pngVisitor{ | ||||
| 		ps:               ps, | ||||
| 		writer:           writer, | ||||
| 		lastWrittenChunk: -1, | ||||
| 	} | ||||
| 
 | ||||
| 	// use the png visitor's 'split' function, which satisfies the bufio.SplitFunc interface | ||||
| 	// Png visitor's 'split' function | ||||
| 	// satisfies bufio.SplitFunc{}. | ||||
| 	scanner.Split(v.split) | ||||
| 
 | ||||
| 	scanAndClose(scanner, writer) | ||||
| 	go scanAndClose(scanner, writer) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func scanAndClose(scanner *bufio.Scanner, writer io.WriteCloser) { | ||||
| 	// scan asynchronously until there's nothing left to scan, and then close the writer | ||||
| 	// so that the reader on the other side knows that we're done | ||||
| 	// | ||||
| 	// due to the nature of io.Pipe, writing won't actually work | ||||
| 	// until the pipeReader starts being read by the caller, which | ||||
| 	// is why we do this asynchronously | ||||
| 	go func() { | ||||
| 		defer writer.Close() | ||||
| 		for scanner.Scan() { | ||||
| 		} | ||||
| 		if scanner.Err() != nil { | ||||
| 			logger.Error(scanner.Err()) | ||||
| 		} | ||||
| // scanAndClose scans through the given scanner until there's | ||||
| // nothing left to scan, and then closes the writer so that the | ||||
| // reader on the other side of the pipe knows that we're done. | ||||
| // | ||||
| // Any error encountered when scanning will be logged by terminator. | ||||
| // | ||||
| // Due to the nature of io.Pipe, writing won't actually work | ||||
| // until the pipeReader starts being read by the caller, which | ||||
| // is why this function should always be called asynchronously. | ||||
| func scanAndClose(scanner *bufio.Scanner, writer *io.PipeWriter) { | ||||
| 	var err error | ||||
| 
 | ||||
| 	defer func() { | ||||
| 		// Always close writer, using returned | ||||
| 		// scanner error (if any). If err is nil | ||||
| 		// then the standard io.EOF will be used. | ||||
| 		// (this will not overwrite existing). | ||||
| 		writer.CloseWithError(err) | ||||
| 	}() | ||||
| 
 | ||||
| 	for scanner.Scan() { | ||||
| 	} | ||||
| 
 | ||||
| 	// Set error on return. | ||||
| 	err = scanner.Err() | ||||
| } | ||||
|  |  | |||
							
								
								
									
										81
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,81 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"encoding/binary" | ||||
| ) | ||||
| 
 | ||||
| type ChunkDecoder struct { | ||||
| } | ||||
| 
 | ||||
| func NewChunkDecoder() *ChunkDecoder { | ||||
| 	return new(ChunkDecoder) | ||||
| } | ||||
| 
 | ||||
| func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) { | ||||
| 	switch c.Type { | ||||
| 	case "IHDR": | ||||
| 		return cd.decodeIHDR(c) | ||||
| 	} | ||||
| 
 | ||||
| 	// We don't decode this type. | ||||
| 	return nil, nil | ||||
| } | ||||
| 
 | ||||
| type ChunkIHDR struct { | ||||
| 	Width             uint32 | ||||
| 	Height            uint32 | ||||
| 	BitDepth          uint8 | ||||
| 	ColorType         uint8 | ||||
| 	CompressionMethod uint8 | ||||
| 	FilterMethod      uint8 | ||||
| 	InterlaceMethod   uint8 | ||||
| } | ||||
| 
 | ||||
| func (ihdr *ChunkIHDR) String() string { | ||||
| 	return fmt.Sprintf("IHDR<WIDTH=(%d) HEIGHT=(%d) DEPTH=(%d) COLOR-TYPE=(%d) COMP-METHOD=(%d) FILTER-METHOD=(%d) INTRLC-METHOD=(%d)>", | ||||
| 		ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod, | ||||
| 	) | ||||
| } | ||||
| 
 | ||||
| func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (*ChunkIHDR, error) { | ||||
| 	var ( | ||||
| 		b     = bytes.NewBuffer(c.Data) | ||||
| 		ihdr  = new(ChunkIHDR) | ||||
| 		readf = func(data interface{}) error { | ||||
| 			return binary.Read(b, binary.BigEndian, data) | ||||
| 		} | ||||
| 	) | ||||
| 
 | ||||
| 	if err := readf(&ihdr.Width); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := readf(&ihdr.Height); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := readf(&ihdr.BitDepth); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := readf(&ihdr.ColorType); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := readf(&ihdr.CompressionMethod); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := readf(&ihdr.FilterMethod); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := readf(&ihdr.InterlaceMethod); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return ihdr, nil | ||||
| } | ||||
							
								
								
									
										85
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,85 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"image" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 
 | ||||
| 	"image/png" | ||||
| 
 | ||||
| 	riimage "github.com/dsoprea/go-utility/v2/image" | ||||
| ) | ||||
| 
 | ||||
| // PngMediaParser knows how to parse a PNG stream. | ||||
| type PngMediaParser struct { | ||||
| } | ||||
| 
 | ||||
| // NewPngMediaParser returns a new `PngMediaParser`. | ||||
| func NewPngMediaParser() riimage.MediaParser { | ||||
| 	return new(PngMediaParser) | ||||
| } | ||||
| 
 | ||||
| // Parse parses a PNG stream given a `io.ReadSeeker`. | ||||
| func (pmp *PngMediaParser) Parse( | ||||
| 	rs io.ReadSeeker, | ||||
| 	size int, | ||||
| ) (riimage.MediaContext, error) { | ||||
| 	ps := NewPngSplitter() | ||||
| 	if err := ps.readHeader(rs); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	s := bufio.NewScanner(rs) | ||||
| 
 | ||||
| 	// Since each segment can be any | ||||
| 	// size, our buffer must be allowed | ||||
| 	// to grow as large as the file. | ||||
| 	buffer := []byte{} | ||||
| 	s.Buffer(buffer, size) | ||||
| 	s.Split(ps.Split) | ||||
| 
 | ||||
| 	for s.Scan() { | ||||
| 	} | ||||
| 
 | ||||
| 	if err := s.Err(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return ps.Chunks() | ||||
| } | ||||
| 
 | ||||
| // ParseFile parses a PNG stream given a file-path. | ||||
| func (pmp *PngMediaParser) ParseFile(filepath string) (riimage.MediaContext, error) { | ||||
| 	f, err := os.Open(filepath) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 
 | ||||
| 	stat, err := f.Stat() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	size := stat.Size() | ||||
| 	return pmp.Parse(f, int(size)) | ||||
| } | ||||
| 
 | ||||
| // ParseBytes parses a PNG stream given a byte-slice. | ||||
| func (pmp *PngMediaParser) ParseBytes(data []byte) (riimage.MediaContext, error) { | ||||
| 	br := bytes.NewReader(data) | ||||
| 	return pmp.Parse(br, len(data)) | ||||
| } | ||||
| 
 | ||||
| // LooksLikeFormat returns a boolean indicating | ||||
| // whether the stream looks like a PNG image. | ||||
| func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool { | ||||
| 	return bytes.Equal(data[:len(PngSignature)], PngSignature[:]) | ||||
| } | ||||
| 
 | ||||
| // GetImage returns an image.Image-compatible struct. | ||||
| func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) { | ||||
| 	return png.Decode(r) | ||||
| } | ||||
							
								
								
									
										386
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										386
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,386 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 
 | ||||
| 	"encoding/binary" | ||||
| 	"hash/crc32" | ||||
| 
 | ||||
| 	"github.com/dsoprea/go-exif/v3" | ||||
| 	exifcommon "github.com/dsoprea/go-exif/v3/common" | ||||
| 	riimage "github.com/dsoprea/go-utility/v2/image" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	PngSignature  = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'} | ||||
| 	EXifChunkType = "eXIf" | ||||
| 	IHDRChunkType = "IHDR" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	ErrNotPng     = errors.New("not png data") | ||||
| 	ErrCrcFailure = errors.New("crc failure") | ||||
| ) | ||||
| 
 | ||||
| // ChunkSlice encapsulates a slice of chunks. | ||||
| type ChunkSlice struct { | ||||
| 	chunks []*Chunk | ||||
| } | ||||
| 
 | ||||
| func NewChunkSlice(chunks []*Chunk) (*ChunkSlice, error) { | ||||
| 	if len(chunks) == 0 { | ||||
| 		err := errors.New("ChunkSlice must be initialized with at least one chunk (IHDR)") | ||||
| 		return nil, err | ||||
| 	} else if chunks[0].Type != IHDRChunkType { | ||||
| 		err := errors.New("first chunk in any ChunkSlice must be an IHDR") | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &ChunkSlice{chunks}, nil | ||||
| } | ||||
| 
 | ||||
| func NewPngChunkSlice() (*ChunkSlice, error) { | ||||
| 	ihdrChunk := &Chunk{ | ||||
| 		Type: IHDRChunkType, | ||||
| 	} | ||||
| 
 | ||||
| 	ihdrChunk.UpdateCrc32() | ||||
| 
 | ||||
| 	return NewChunkSlice([]*Chunk{ihdrChunk}) | ||||
| } | ||||
| 
 | ||||
| func (cs *ChunkSlice) String() string { | ||||
| 	return fmt.Sprintf("ChunkSlize<LEN=(%d)>", len(cs.chunks)) | ||||
| } | ||||
| 
 | ||||
| // Chunks exposes the actual slice. | ||||
| func (cs *ChunkSlice) Chunks() []*Chunk { | ||||
| 	return cs.chunks | ||||
| } | ||||
| 
 | ||||
| // Write encodes and writes all chunks. | ||||
| func (cs *ChunkSlice) WriteTo(w io.Writer) error { | ||||
| 	if _, err := w.Write(PngSignature[:]); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(dustin): !! This should respect | ||||
| 	// the safe-to-copy characteristic. | ||||
| 	for _, c := range cs.chunks { | ||||
| 		if _, err := c.WriteTo(w); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Index returns a map of chunk types to chunk slices, grouping all like chunks. | ||||
| func (cs *ChunkSlice) Index() (index map[string][]*Chunk) { | ||||
| 	index = make(map[string][]*Chunk) | ||||
| 	for _, c := range cs.chunks { | ||||
| 		if grouped, found := index[c.Type]; found { | ||||
| 			index[c.Type] = append(grouped, c) | ||||
| 		} else { | ||||
| 			index[c.Type] = []*Chunk{c} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return index | ||||
| } | ||||
| 
 | ||||
| // FindExif returns the the segment that hosts the EXIF data. | ||||
| func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) { | ||||
| 	index := cs.Index() | ||||
| 	if chunks, found := index[EXifChunkType]; found { | ||||
| 		return chunks[0], nil | ||||
| 	} | ||||
| 
 | ||||
| 	return nil, exif.ErrNoExif | ||||
| } | ||||
| 
 | ||||
| // Exif returns an `exif.Ifd` instance with the existing tags. | ||||
| func (cs *ChunkSlice) Exif() (*exif.Ifd, []byte, error) { | ||||
| 	chunk, err := cs.FindExif() | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	im, err := exifcommon.NewIfdMappingWithStandard() | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	ti := exif.NewTagIndex() | ||||
| 
 | ||||
| 	_, index, err := exif.Collect(im, ti, chunk.Data) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return index.RootIfd, chunk.Data, nil | ||||
| } | ||||
| 
 | ||||
| // ConstructExifBuilder returns an `exif.IfdBuilder` instance | ||||
| // (needed for modifying) preloaded with all existing tags. | ||||
| func (cs *ChunkSlice) ConstructExifBuilder() (*exif.IfdBuilder, error) { | ||||
| 	rootIfd, _, err := cs.Exif() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return exif.NewIfdBuilderFromExistingChain(rootIfd), nil | ||||
| } | ||||
| 
 | ||||
| // SetExif encodes and sets EXIF data into this segment. | ||||
| func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) error { | ||||
| 	// Encode. | ||||
| 
 | ||||
| 	ibe := exif.NewIfdByteEncoder() | ||||
| 
 | ||||
| 	exifData, err := ibe.EncodeToExif(ib) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// Set. | ||||
| 
 | ||||
| 	exifChunk, err := cs.FindExif() | ||||
| 
 | ||||
| 	switch { | ||||
| 	case err == nil: | ||||
| 		// EXIF chunk already exists. | ||||
| 		exifChunk.Data = exifData | ||||
| 		exifChunk.Length = uint32(len(exifData)) | ||||
| 
 | ||||
| 	case errors.Is(err, exif.ErrNoExif): | ||||
| 		// Add a EXIF chunk for the first time. | ||||
| 		exifChunk = &Chunk{ | ||||
| 			Type:   EXifChunkType, | ||||
| 			Data:   exifData, | ||||
| 			Length: uint32(len(exifData)), | ||||
| 		} | ||||
| 
 | ||||
| 		// Insert exif after the IHDR chunk; it's | ||||
| 		// a reliably appropriate place to put it. | ||||
| 		cs.chunks = append( | ||||
| 			cs.chunks[:1], | ||||
| 			append( | ||||
| 				[]*Chunk{exifChunk}, | ||||
| 				cs.chunks[1:]..., | ||||
| 			)..., | ||||
| 		) | ||||
| 
 | ||||
| 	default: | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	exifChunk.UpdateCrc32() | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // PngSplitter hosts the princpal `Split()` | ||||
| // method uses by `bufio.Scanner`. | ||||
| type PngSplitter struct { | ||||
| 	chunks        []*Chunk | ||||
| 	currentOffset int | ||||
| 
 | ||||
| 	doCheckCrc bool | ||||
| 	crcErrors  []string | ||||
| } | ||||
| 
 | ||||
| func (ps *PngSplitter) Chunks() (*ChunkSlice, error) { | ||||
| 	return NewChunkSlice(ps.chunks) | ||||
| } | ||||
| 
 | ||||
| func (ps *PngSplitter) DoCheckCrc(doCheck bool) { | ||||
| 	ps.doCheckCrc = doCheck | ||||
| } | ||||
| 
 | ||||
| func (ps *PngSplitter) CrcErrors() []string { | ||||
| 	return ps.crcErrors | ||||
| } | ||||
| 
 | ||||
| func NewPngSplitter() *PngSplitter { | ||||
| 	return &PngSplitter{ | ||||
| 		chunks:     make([]*Chunk, 0), | ||||
| 		doCheckCrc: true, | ||||
| 		crcErrors:  make([]string, 0), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Chunk describes a single chunk. | ||||
| type Chunk struct { | ||||
| 	Offset int | ||||
| 	Length uint32 | ||||
| 	Type   string | ||||
| 	Data   []byte | ||||
| 	Crc    uint32 | ||||
| } | ||||
| 
 | ||||
| func (c *Chunk) String() string { | ||||
| 	return fmt.Sprintf("Chunk<OFFSET=(%d) LENGTH=(%d) TYPE=[%s] CRC=(%d)>", c.Offset, c.Length, c.Type, c.Crc) | ||||
| } | ||||
| 
 | ||||
| func calculateCrc32(chunk *Chunk) uint32 { | ||||
| 	c := crc32.NewIEEE() | ||||
| 
 | ||||
| 	c.Write([]byte(chunk.Type)) | ||||
| 	c.Write(chunk.Data) | ||||
| 
 | ||||
| 	return c.Sum32() | ||||
| } | ||||
| 
 | ||||
| func (c *Chunk) UpdateCrc32() { | ||||
| 	c.Crc = calculateCrc32(c) | ||||
| } | ||||
| 
 | ||||
| func (c *Chunk) CheckCrc32() bool { | ||||
| 	expected := calculateCrc32(c) | ||||
| 	return c.Crc == expected | ||||
| } | ||||
| 
 | ||||
| // Bytes encodes and returns the bytes for this chunk. | ||||
| func (c *Chunk) Bytes() ([]byte, error) { | ||||
| 	if len(c.Data) != int(c.Length) { | ||||
| 		return nil, errors.New("length of data not correct") | ||||
| 	} | ||||
| 	b := make([]byte, 0, 4+4+c.Length+4) | ||||
| 	b = binary.BigEndian.AppendUint32(b, c.Length) | ||||
| 	b = append(b, c.Type...) | ||||
| 	b = append(b, c.Data...) | ||||
| 	b = binary.BigEndian.AppendUint32(b, c.Crc) | ||||
| 	return b, nil | ||||
| } | ||||
| 
 | ||||
| // Write encodes and writes the bytes for this chunk. | ||||
| func (c *Chunk) WriteTo(w io.Writer) (int, error) { | ||||
| 	if len(c.Data) != int(c.Length) { | ||||
| 		return 0, errors.New("length of data not correct") | ||||
| 	} | ||||
| 
 | ||||
| 	var n int | ||||
| 
 | ||||
| 	b := make([]byte, 4) // uint32 buf | ||||
| 
 | ||||
| 	binary.BigEndian.PutUint32(b, c.Length) | ||||
| 	if nn, err := w.Write(b); err != nil { | ||||
| 		return n + nn, err | ||||
| 	} | ||||
| 
 | ||||
| 	n += len(b) | ||||
| 
 | ||||
| 	if nn, err := io.WriteString(w, c.Type); err != nil { | ||||
| 		return n + nn, err | ||||
| 	} | ||||
| 
 | ||||
| 	n += len(c.Type) | ||||
| 
 | ||||
| 	if nn, err := w.Write(c.Data); err != nil { | ||||
| 		return n + nn, err | ||||
| 	} | ||||
| 
 | ||||
| 	n += len(c.Data) | ||||
| 
 | ||||
| 	binary.BigEndian.PutUint32(b, c.Crc) | ||||
| 	if nn, err := w.Write(b); err != nil { | ||||
| 		return n + nn, err | ||||
| 	} | ||||
| 
 | ||||
| 	n += len(b) | ||||
| 
 | ||||
| 	return n, nil | ||||
| } | ||||
| 
 | ||||
| // readHeader verifies that the PNG header bytes appear next. | ||||
| func (ps *PngSplitter) readHeader(r io.Reader) error { | ||||
| 	var ( | ||||
| 		sigLen = len(PngSignature) | ||||
| 		header = make([]byte, sigLen) | ||||
| 	) | ||||
| 
 | ||||
| 	if _, err := r.Read(header); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	ps.currentOffset += sigLen | ||||
| 	if !bytes.Equal(header, PngSignature[:]) { | ||||
| 		return ErrNotPng | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Split fulfills the `bufio.SplitFunc` | ||||
| // function definition for `bufio.Scanner`. | ||||
| func (ps *PngSplitter) Split( | ||||
| 	data []byte, | ||||
| 	atEOF bool, | ||||
| ) ( | ||||
| 	advance int, | ||||
| 	token []byte, | ||||
| 	err error, | ||||
| ) { | ||||
| 	// We might have more than one chunk's worth, and, | ||||
| 	// if `atEOF` is true, we won't be called again. | ||||
| 	// We'll repeatedly try to read additional chunks, | ||||
| 	// but, when we run out of the data we were given | ||||
| 	// then we'll return the number of bytes for the | ||||
| 	// chunks we've already completely read. Then, we'll | ||||
| 	// be called again from the end ofthose bytes, at | ||||
| 	// which point we'll indicate that we don't yet have | ||||
| 	// enough for another chunk, and we should be then | ||||
| 	// called with more. | ||||
| 	for { | ||||
| 		len_ := len(data) | ||||
| 		if len_ < 8 { | ||||
| 			return advance, nil, nil | ||||
| 		} | ||||
| 
 | ||||
| 		length := binary.BigEndian.Uint32(data[:4]) | ||||
| 		type_ := string(data[4:8]) | ||||
| 		chunkSize := (8 + int(length) + 4) | ||||
| 
 | ||||
| 		if len_ < chunkSize { | ||||
| 			return advance, nil, nil | ||||
| 		} | ||||
| 
 | ||||
| 		crcIndex := 8 + length | ||||
| 		crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4]) | ||||
| 
 | ||||
| 		content := make([]byte, length) | ||||
| 		copy(content, data[8:8+length]) | ||||
| 
 | ||||
| 		c := &Chunk{ | ||||
| 			Length: length, | ||||
| 			Type:   type_, | ||||
| 			Data:   content, | ||||
| 			Crc:    crc, | ||||
| 			Offset: ps.currentOffset, | ||||
| 		} | ||||
| 
 | ||||
| 		ps.chunks = append(ps.chunks, c) | ||||
| 
 | ||||
| 		if !c.CheckCrc32() { | ||||
| 			ps.crcErrors = append(ps.crcErrors, type_) | ||||
| 
 | ||||
| 			if ps.doCheckCrc { | ||||
| 				err = ErrCrcFailure | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		advance += chunkSize | ||||
| 		ps.currentOffset += chunkSize | ||||
| 
 | ||||
| 		data = data[chunkSize:] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// Enforce interface conformance. | ||||
| 	_ riimage.MediaContext = new(ChunkSlice) | ||||
| ) | ||||
							
								
								
									
										77
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go
									
										
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							|  | @ -0,0 +1,77 @@ | |||
| package pngstructure | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	assetsPath = "assets" | ||||
| ) | ||||
| 
 | ||||
| func getModuleRootPath() (string, error) { | ||||
| 	moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH") | ||||
| 	if moduleRootPath != "" { | ||||
| 		return moduleRootPath, nil | ||||
| 	} | ||||
| 
 | ||||
| 	currentWd, err := os.Getwd() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 
 | ||||
| 	currentPath := currentWd | ||||
| 	visited := make([]string, 0) | ||||
| 
 | ||||
| 	for { | ||||
| 		tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") | ||||
| 
 | ||||
| 		_, err := os.Stat(tryStampFilepath) | ||||
| 		if err != nil && !os.IsNotExist(err) { | ||||
| 			return "", err | ||||
| 		} else if err == nil { | ||||
| 			break | ||||
| 		} | ||||
| 
 | ||||
| 		visited = append(visited, tryStampFilepath) | ||||
| 
 | ||||
| 		currentPath = path.Dir(currentPath) | ||||
| 		if currentPath == "/" { | ||||
| 			return "", fmt.Errorf("could not find module-root: %v", visited) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return currentPath, nil | ||||
| } | ||||
| 
 | ||||
| func getTestAssetsPath() (string, error) { | ||||
| 	if assetsPath == "" { | ||||
| 		moduleRootPath, err := getModuleRootPath() | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 
 | ||||
| 		assetsPath = path.Join(moduleRootPath, "assets") | ||||
| 	} | ||||
| 
 | ||||
| 	return assetsPath, nil | ||||
| } | ||||
| 
 | ||||
| func getTestBasicImageFilepath() (string, error) { | ||||
| 	assetsPath, err := getTestAssetsPath() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 
 | ||||
| 	return path.Join(assetsPath, "libpng.png"), nil | ||||
| } | ||||
| 
 | ||||
| func getTestExifImageFilepath() (string, error) { | ||||
| 	assetsPath, err := getTestAssetsPath() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 
 | ||||
| 	return path.Join(assetsPath, "exif.png"), nil | ||||
| } | ||||
|  | @ -3,8 +3,6 @@ package pngstructure | |||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/dsoprea/go-logging" | ||||
| ) | ||||
| 
 | ||||
| func DumpBytes(data []byte) { | ||||
|  | @ -32,34 +30,38 @@ func DumpBytesClause(data []byte) { | |||
| 	fmt.Printf(" }\n") | ||||
| } | ||||
| 
 | ||||
| func DumpBytesToString(data []byte) string { | ||||
| func DumpBytesToString(data []byte) (string, error) { | ||||
| 	b := new(bytes.Buffer) | ||||
| 
 | ||||
| 	for i, x := range data { | ||||
| 		_, err := b.WriteString(fmt.Sprintf("%02x", x)) | ||||
| 		log.PanicIf(err) | ||||
| 		if _, err := b.WriteString(fmt.Sprintf("%02x", x)); err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 
 | ||||
| 		if i < len(data)-1 { | ||||
| 			_, err := b.WriteRune(' ') | ||||
| 			log.PanicIf(err) | ||||
| 			if _, err := b.WriteRune(' '); err != nil { | ||||
| 				return "", err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return b.String() | ||||
| 	return b.String(), nil | ||||
| } | ||||
| 
 | ||||
| func DumpBytesClauseToString(data []byte) string { | ||||
| func DumpBytesClauseToString(data []byte) (string, error) { | ||||
| 	b := new(bytes.Buffer) | ||||
| 
 | ||||
| 	for i, x := range data { | ||||
| 		_, err := b.WriteString(fmt.Sprintf("0x%02x", x)) | ||||
| 		log.PanicIf(err) | ||||
| 		if _, err := b.WriteString(fmt.Sprintf("0x%02x", x)); err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 
 | ||||
| 		if i < len(data)-1 { | ||||
| 			_, err := b.WriteString(", ") | ||||
| 			log.PanicIf(err) | ||||
| 			if _, err := b.WriteString(", "); err != nil { | ||||
| 				return "", err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return b.String() | ||||
| 	return b.String(), nil | ||||
| } | ||||
							
								
								
									
										10
									
								
								vendor/modules.txt
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/modules.txt
									
										
									
									
										vendored
									
									
								
							|  | @ -167,9 +167,6 @@ github.com/dsoprea/go-logging | |||
| # github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d | ||||
| ## explicit; go 1.13 | ||||
| github.com/dsoprea/go-photoshop-info-format | ||||
| # github.com/dsoprea/go-png-image-structure/v2 v2.0.0-20210512210324-29b889a6093d | ||||
| ## explicit; go 1.12 | ||||
| github.com/dsoprea/go-png-image-structure/v2 | ||||
| # github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e | ||||
| ## explicit; go 1.12 | ||||
| github.com/dsoprea/go-utility/v2/filesystem | ||||
|  | @ -658,12 +655,15 @@ github.com/superseriousbusiness/activity/streams/values/rfc2045 | |||
| github.com/superseriousbusiness/activity/streams/values/rfc5988 | ||||
| github.com/superseriousbusiness/activity/streams/values/string | ||||
| github.com/superseriousbusiness/activity/streams/vocab | ||||
| # github.com/superseriousbusiness/exif-terminator v0.5.0 | ||||
| ## explicit; go 1.17 | ||||
| # github.com/superseriousbusiness/exif-terminator v0.6.0 | ||||
| ## explicit; go 1.21 | ||||
| github.com/superseriousbusiness/exif-terminator | ||||
| # github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe | ||||
| ## explicit; go 1.17 | ||||
| github.com/superseriousbusiness/go-jpeg-image-structure/v2 | ||||
| # github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB | ||||
| ## explicit; go 1.12 | ||||
| github.com/superseriousbusiness/go-png-image-structure/v2 | ||||
| # github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 | ||||
| ## explicit; go 1.13 | ||||
| github.com/superseriousbusiness/oauth2/v4 | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue