[chore] update dependencies (#4423)

- codeberg.org/gruf/go-ffmpreg: v0.6.10 -> v0.6.11
- github.com/spf13/cast: v1.9.2 -> v1.10.0
- github.com/spf13/viper: v1.20.1 -> v1.21.0
- golang.org/x/crypto: v0.41.0 -> v0.42.0
- golang.org/x/image: v0.30.0 -> v0.31.0

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4423
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-09-09 16:12:29 +02:00 committed by kim
commit c949b9f2d1
97 changed files with 14611 additions and 3494 deletions

View file

@ -1,7 +1,7 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
image_family: freebsd-14-1
image_family: freebsd-14-2
install_script:
- pkg update -f
- pkg install -y go

View file

@ -1,6 +1,39 @@
# Changelog
1.8.0 2023-10-31
1.9.0 2024-04-04
----------------
### Changes and fixes
- all: make BufferedWatcher buffered again ([#657])
- inotify: fix race when adding/removing watches while a watched path is being
deleted ([#678], [#686])
- inotify: don't send empty event if a watched path is unmounted ([#655])
- inotify: don't register duplicate watches when watching both a symlink and its
target; previously that would get "half-added" and removing the second would
panic ([#679])
- kqueue: fix watching relative symlinks ([#681])
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
kqueue ([#682])
- illumos: don't send error if changed file is deleted while processing the
event ([#678])
[#657]: https://github.com/fsnotify/fsnotify/pull/657
[#678]: https://github.com/fsnotify/fsnotify/pull/678
[#686]: https://github.com/fsnotify/fsnotify/pull/686
[#655]: https://github.com/fsnotify/fsnotify/pull/655
[#681]: https://github.com/fsnotify/fsnotify/pull/681
[#679]: https://github.com/fsnotify/fsnotify/pull/679
[#682]: https://github.com/fsnotify/fsnotify/pull/682
1.8.0 2024-10-31
----------------
### Additions

View file

@ -77,6 +77,7 @@ End-of-line escapes with `\` are not supported.
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
parallel by default, so -parallel=1 is probably a good
idea).
print [any strings] # Print text to stdout; for debugging.
touch path
mkdir [-p] dir

View file

@ -15,7 +15,6 @@ Platform support:
| ReadDirectoryChangesW | Windows | Supported |
| FEN | illumos | Supported |
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
@ -25,7 +24,6 @@ untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage
-----

View file

@ -9,6 +9,7 @@ package fsnotify
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
@ -19,27 +20,25 @@ import (
)
type fen struct {
*shared
Events chan Event
Errors chan error
mu sync.Mutex
port *unix.EventPort
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
dirs map[string]Op // Explicitly watched directories
watches map[string]Op // Explicitly watched non-directories
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(0, ev, errs)
}
var defaultBufferSize = 0
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
w := &fen{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
dirs: make(map[string]Op),
watches: make(map[string]Op),
done: make(chan struct{}),
}
var err error
@ -52,49 +51,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
return w, nil
}
// sendEvent attempts to send an event to the user, returning true if the event
// was put in the channel successfully and false if the watcher has been closed.
func (w *fen) sendEvent(name string, op Op) (sent bool) {
select {
case <-w.done:
return false
case w.Events <- Event{Name: name, Op: op}:
return true
}
}
// sendError attempts to send an error to the user, returning true if the error
// was put in the channel successfully and false if the watcher has been closed.
func (w *fen) sendError(err error) (sent bool) {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *fen) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
func (w *fen) Close() error {
// Take the lock used by associateFile to prevent lingering events from
// being processed after the close
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
if w.shared.close() {
return nil
}
close(w.done)
return w.port.Close()
}
@ -209,7 +169,7 @@ func (w *fen) readEvents() {
return
}
// There was an error not caused by calling w.Close()
if !w.sendError(err) {
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
return
}
}
@ -277,13 +237,13 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
if !w.sendEvent(path, Rename) {
if !w.sendEvent(Event{Name: path, Op: Rename}) {
return nil
}
// Don't keep watching the new file name
@ -297,7 +257,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
// inotify reports a Remove event in this case, so we simulate this
// here.
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't keep watching the file that was removed
@ -331,7 +291,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Suppress extra write events on removed directories; they are not
@ -346,7 +306,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't return the error
@ -359,7 +319,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
return err
}
} else {
if !w.sendEvent(path, Write) {
if !w.sendEvent(Event{Name: path, Op: Write}) {
return nil
}
}
@ -367,7 +327,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
if !w.sendEvent(path, Chmod) {
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
return nil
}
}
@ -376,17 +336,27 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
return w.associateFile(path, stat, isWatched)
err := w.associateFile(path, stat, isWatched)
if errors.Is(err, fs.ErrNotExist) {
// Path may have been removed since the stat.
err = nil
}
return err
}
return nil
}
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen, as
// everything else should still be watched.
func (w *fen) updateDirectory(path string) error {
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen,
// as everything else should still be watched.
files, err := os.ReadDir(path)
if err != nil {
// Directory no longer exists: probably just deleted since we got the
// event.
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
@ -401,10 +371,15 @@ func (w *fen) updateDirectory(path string) error {
return err
}
err = w.associateFile(path, finfo, false)
if errors.Is(err, fs.ErrNotExist) {
// File may have disappeared between getting the dir listing and
// adding the port: that's okay to ignore.
continue
}
if !w.sendError(err) {
return nil
}
if !w.sendEvent(path, Create) {
if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
@ -430,7 +405,7 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
if err != nil && !errors.Is(err, unix.ENOENT) {
return err
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
}
@ -446,14 +421,22 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
if true {
events |= unix.FILE_ATTRIB
}
return w.port.AssociatePath(path, stat, events, stat.Mode())
err := w.port.AssociatePath(path, stat, events, stat.Mode())
if err != nil {
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
return w.port.DissociatePath(path)
err := w.port.DissociatePath(path)
if err != nil {
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) WatchList() []string {

View file

@ -19,6 +19,7 @@ import (
)
type inotify struct {
*shared
Events chan Event
Errors chan error
@ -27,8 +28,6 @@ type inotify struct {
fd int
inotifyFile *os.File
watches *watches
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close
// Store rename cookies in an array, with the index wrapping to 0. Almost
@ -52,7 +51,6 @@ type inotify struct {
type (
watches struct {
mu sync.RWMutex
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
@ -75,34 +73,13 @@ func newWatches() *watches {
}
}
func (w *watches) len() int {
w.mu.RLock()
defer w.mu.RUnlock()
return len(w.wd)
}
func (w *watches) add(ww *watch) {
w.mu.Lock()
defer w.mu.Unlock()
w.wd[ww.wd] = ww
w.path[ww.path] = ww.wd
}
func (w *watches) remove(wd uint32) {
w.mu.Lock()
defer w.mu.Unlock()
watch := w.wd[wd] // Could have had Remove() called. See #616.
if watch == nil {
return
}
delete(w.path, watch.path)
delete(w.wd, wd)
}
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
func (w *watches) len() int { return len(w.wd) }
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
func (w *watches) removePath(path string) ([]uint32, error) {
w.mu.Lock()
defer w.mu.Unlock()
path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
@ -123,7 +100,7 @@ func (w *watches) removePath(path string) ([]uint32, error) {
wds := make([]uint32, 0, 8)
wds = append(wds, wd)
for p, rwd := range w.path {
if filepath.HasPrefix(p, path) {
if strings.HasPrefix(p, path) {
delete(w.path, p)
delete(w.wd, rwd)
wds = append(wds, rwd)
@ -132,22 +109,7 @@ func (w *watches) removePath(path string) ([]uint32, error) {
return wds, nil
}
func (w *watches) byPath(path string) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[w.path[path]]
}
func (w *watches) byWd(wd uint32) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[wd]
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
w.mu.Lock()
defer w.mu.Unlock()
var existing *watch
wd, ok := w.path[path]
if ok {
@ -170,11 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
return nil
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(0, ev, errs)
}
var defaultBufferSize = 0
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
@ -183,12 +143,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
}
w := &inotify{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
@ -196,46 +156,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
return w, nil
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *inotify) sendEvent(e Event) bool {
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *inotify) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *inotify) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
func (w *inotify) Close() error {
w.doneMu.Lock()
if w.isClosed() {
w.doneMu.Unlock()
if w.shared.close() {
return nil
}
close(w.done)
w.doneMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@ -244,9 +168,7 @@ func (w *inotify) Close() error {
return err
}
// Wait for goroutine to close
<-w.doneResp
<-w.doneResp // Wait for readEvents() to finish.
return nil
}
@ -266,6 +188,43 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
add := func(path string, with withOpts, recurse bool) error {
var flags uint32
if with.noFollow {
flags |= unix.IN_DONT_FOLLOW
}
if with.op.Has(Create) {
flags |= unix.IN_CREATE
}
if with.op.Has(Write) {
flags |= unix.IN_MODIFY
}
if with.op.Has(Remove) {
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
}
if with.op.Has(Rename) {
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
}
if with.op.Has(Chmod) {
flags |= unix.IN_ATTRIB
}
if with.op.Has(xUnportableOpen) {
flags |= unix.IN_OPEN
}
if with.op.Has(xUnportableRead) {
flags |= unix.IN_ACCESS
}
if with.op.Has(xUnportableCloseWrite) {
flags |= unix.IN_CLOSE_WRITE
}
if with.op.Has(xUnportableCloseRead) {
flags |= unix.IN_CLOSE_NOWRITE
}
return w.register(path, flags, recurse)
}
w.mu.Lock()
defer w.mu.Unlock()
path, recurse := recursivePath(path)
if recurse {
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
@ -289,46 +248,11 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error {
w.sendEvent(Event{Name: root, Op: Create})
}
return w.add(root, with, true)
return add(root, with, true)
})
}
return w.add(path, with, false)
}
func (w *inotify) add(path string, with withOpts, recurse bool) error {
var flags uint32
if with.noFollow {
flags |= unix.IN_DONT_FOLLOW
}
if with.op.Has(Create) {
flags |= unix.IN_CREATE
}
if with.op.Has(Write) {
flags |= unix.IN_MODIFY
}
if with.op.Has(Remove) {
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
}
if with.op.Has(Rename) {
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
}
if with.op.Has(Chmod) {
flags |= unix.IN_ATTRIB
}
if with.op.Has(xUnportableOpen) {
flags |= unix.IN_OPEN
}
if with.op.Has(xUnportableRead) {
flags |= unix.IN_ACCESS
}
if with.op.Has(xUnportableCloseWrite) {
flags |= unix.IN_CLOSE_WRITE
}
if with.op.Has(xUnportableCloseRead) {
flags |= unix.IN_CLOSE_NOWRITE
}
return w.register(path, flags, recurse)
return add(path, with, false)
}
func (w *inotify) register(path string, flags uint32, recurse bool) error {
@ -342,6 +266,10 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error {
return nil, err
}
if e, ok := w.watches.wd[uint32(wd)]; ok {
return e, nil
}
if existing == nil {
return &watch{
wd: uint32(wd),
@ -365,6 +293,9 @@ func (w *inotify) Remove(name string) error {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(filepath.Clean(name))
}
@ -399,13 +330,12 @@ func (w *inotify) WatchList() []string {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, w.watches.len())
w.watches.mu.RLock()
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
w.watches.mu.RUnlock()
return entries
}
@ -418,21 +348,17 @@ func (w *inotify) readEvents() {
close(w.Events)
}()
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
errno error // Syscall errno
)
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
for {
// See if we have been closed.
if w.isClosed() {
return
}
n, err := w.inotifyFile.Read(buf[:])
switch {
case errors.Unwrap(err) == os.ErrClosed:
return
case err != nil:
if err != nil {
if errors.Is(err, os.ErrClosed) {
return
}
if !w.sendError(err) {
return
}
@ -440,13 +366,9 @@ func (w *inotify) readEvents() {
}
if n < unix.SizeofInotifyEvent {
var err error
err := errors.New("notify: short read in readEvents()") // Read was too short.
if n == 0 {
err = io.EOF // If EOF is received. This should really never happen.
} else if n < 0 {
err = errno // If an error occurred while reading.
} else {
err = errors.New("notify: short read in readEvents()") // Read was too short.
}
if !w.sendError(err) {
return
@ -454,134 +376,137 @@ func (w *inotify) readEvents() {
continue
}
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
// We don't know how many events we just read into the buffer While the
// offset points to at least one whole event.
var offset uint32
for offset <= uint32(n-unix.SizeofInotifyEvent) {
var (
// Point "raw" to the event in the buffer
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask = uint32(raw.Mask)
nameLen = uint32(raw.Len)
// Move to the next event in the buffer
next = func() { offset += unix.SizeofInotifyEvent + nameLen }
)
// Point to the event in the buffer.
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
if mask&unix.IN_Q_OVERFLOW != 0 {
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
if !w.sendError(ErrEventOverflow) {
return
}
}
/// If the event happened to the watched directory or the watched
/// file, the kernel doesn't append the filename to the event, but
/// we would like to always fill the the "Name" field with a valid
/// filename. We retrieve the path of the watch from the "paths"
/// map.
watch := w.watches.byWd(uint32(raw.Wd))
/// Can be nil if Remove() was called in another goroutine for this
/// path inbetween reading the events from the kernel and reading
/// the internal state. Not much we can do about it, so just skip.
/// See #616.
if watch == nil {
next()
continue
ev, ok := w.handleEvent(inEvent, &buf, offset)
if !ok {
return
}
name := watch.path
if nameLen > 0 {
/// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
if debug {
internal.Debug(name, raw.Mask, raw.Cookie)
}
if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0
next()
continue
}
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch.wd)
}
// We can't really update the state when a watched path is moved;
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
// the watch.
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
if watch.recurse {
next() // Do nothing
continue
}
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return
}
}
}
/// Skip if we're watching both this path and the parent; the parent
/// will already send a delete so no need to do it twice.
if mask&unix.IN_DELETE_SELF != 0 {
if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok {
next()
continue
}
}
ev := w.newEvent(name, mask, raw.Cookie)
// Need to update watch path for recurse.
if watch.recurse {
isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR
/// New directory created: set up watch on it.
if isDir && ev.Has(Create) {
err := w.register(ev.Name, watch.flags, true)
if !w.sendError(err) {
return
}
// This was a directory rename, so we need to update all
// the children.
//
// TODO: this is of course pretty slow; we should use a
// better data structure for storing all of this, e.g. store
// children in the watch. I have some code for this in my
// kqueue refactor we can use in the future. For now I'm
// okay with this as it's not publicly available.
// Correctness first, performance second.
if ev.renamedFrom != "" {
w.watches.mu.Lock()
for k, ww := range w.watches.wd {
if k == watch.wd || ww.path == ev.Name {
continue
}
if strings.HasPrefix(ww.path, ev.renamedFrom) {
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
w.watches.wd[k] = ww
}
}
w.watches.mu.Unlock()
}
}
}
/// Send the events that are not ignored on the events channel
if !w.sendEvent(ev) {
return
}
next()
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + inEvent.Len
}
}
}
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
w.mu.Lock()
defer w.mu.Unlock()
/// If the event happened to the watched directory or the watched file, the
/// kernel doesn't append the filename to the event, but we would like to
/// always fill the the "Name" field with a valid filename. We retrieve the
/// path of the watch from the "paths" map.
///
/// Can be nil if Remove() was called in another goroutine for this path
/// inbetween reading the events from the kernel and reading the internal
/// state. Not much we can do about it, so just skip. See #616.
watch := w.watches.byWd(uint32(inEvent.Wd))
if watch == nil {
return Event{}, true
}
var (
name = watch.path
nameLen = uint32(inEvent.Len)
)
if nameLen > 0 {
/// Point "bytes" at the first byte of the filename
bb := *buf
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
}
if debug {
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
}
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
w.watches.remove(watch)
return Event{}, true
}
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch)
}
// We can't really update the state when a watched path is moved; only
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
if watch.recurse { // Do nothing
return Event{}, true
}
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return Event{}, false
}
}
}
/// Skip if we're watching both this path and the parent; the parent will
/// already send a delete so no need to do it twice.
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
_, ok := w.watches.path[filepath.Dir(watch.path)]
if ok {
return Event{}, true
}
}
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
// Need to update watch path for recurse.
if watch.recurse {
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
/// New directory created: set up watch on it.
if isDir && ev.Has(Create) {
err := w.register(ev.Name, watch.flags, true)
if !w.sendError(err) {
return Event{}, false
}
// This was a directory rename, so we need to update all the
// children.
//
// TODO: this is of course pretty slow; we should use a better data
// structure for storing all of this, e.g. store children in the
// watch. I have some code for this in my kqueue refactor we can use
// in the future. For now I'm okay with this as it's not publicly
// available. Correctness first, performance second.
if ev.renamedFrom != "" {
for k, ww := range w.watches.wd {
if k == watch.wd || ww.path == ev.Name {
continue
}
if strings.HasPrefix(ww.path, ev.renamedFrom) {
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
w.watches.wd[k] = ww
}
}
}
}
}
return ev, true
}
func (w *inotify) isRecursive(path string) bool {
ww := w.watches.byPath(path)
if ww == nil { // path could be a file, so also check the Dir.
@ -650,8 +575,8 @@ func (w *inotify) xSupports(op Op) bool {
}
func (w *inotify) state() {
w.watches.mu.Lock()
defer w.watches.mu.Unlock()
w.mu.Lock()
defer w.mu.Unlock()
for wd, ww := range w.watches.wd {
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
}

View file

@ -16,14 +16,13 @@ import (
)
type kqueue struct {
*shared
Events chan Event
Errors chan error
kq int // File descriptor (as returned by the kqueue() syscall).
closepipe [2]int // Pipe used for closing kq.
watches *watches
done chan struct{}
doneMu sync.Mutex
}
type (
@ -132,14 +131,18 @@ func (w *watches) byPath(path string) (watch, bool) {
return info, ok
}
func (w *watches) updateDirFlags(path string, flags uint32) {
func (w *watches) updateDirFlags(path string, flags uint32) bool {
w.mu.Lock()
defer w.mu.Unlock()
fd := w.path[path]
fd, ok := w.path[path]
if !ok { // Already deleted: don't re-set it here.
return false
}
info := w.wd[fd]
info.dirFlags = flags
w.wd[fd] = info
return true
}
func (w *watches) remove(fd int, path string) bool {
@ -179,22 +182,20 @@ func (w *watches) seenBefore(path string) bool {
return ok
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(0, ev, errs)
}
var defaultBufferSize = 0
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
w := &kqueue{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
kq: kq,
closepipe: closepipe,
done: make(chan struct{}),
watches: newWatches(),
}
@ -210,7 +211,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
// all.
func newKqueue() (kq int, closepipe [2]int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
if err != nil {
return kq, closepipe, err
}
@ -239,54 +240,17 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
return kq, closepipe, nil
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *kqueue) sendEvent(e Event) bool {
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *kqueue) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *kqueue) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
func (w *kqueue) Close() error {
w.doneMu.Lock()
if w.isClosed() {
w.doneMu.Unlock()
if w.shared.close() {
return nil
}
close(w.done)
w.doneMu.Unlock()
pathsToRemove := w.watches.listPaths(false)
for _, name := range pathsToRemove {
w.Remove(name)
}
// Send "quit" message to the reader goroutine.
unix.Close(w.closepipe[1])
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
return nil
}
@ -303,7 +267,7 @@ func (w *kqueue) AddWith(name string, opts ...addOpt) error {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
_, err := w.addWatch(name, noteAllEvents)
_, err := w.addWatch(name, noteAllEvents, false)
if err != nil {
return err
}
@ -366,7 +330,7 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
if w.isClosed() {
return "", ErrClosed
}
@ -385,15 +349,15 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
// Follow symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
// Follow symlinks, but only for paths added with Add(), and not paths
// we're adding from internalWatch from a listdir.
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
// Return nil because Linux can add unresolvable symlinks to the
// watch list without problems, so maintain consistency with
// that. There will be no file events for broken symlinks.
// TODO: more specific check; returns os.PathError; ENOENT?
return "", nil
return "", err
}
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(name), link)
}
_, alreadyWatching = w.watches.byPath(link)
@ -408,7 +372,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
name = link
fi, err = os.Lstat(name)
if err != nil {
return "", nil
return "", err
}
}
@ -422,7 +386,6 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
if errors.Is(err, unix.EINTR) {
continue
}
return "", err
}
@ -444,10 +407,16 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
if info.isDir {
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
w.watches.updateDirFlags(name, flags)
if !w.watches.updateDirFlags(name, flags) {
return "", nil
}
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
d := name
if info.linkName != "" {
d = info.linkName
}
if err := w.watchDirectoryFiles(d); err != nil {
return "", err
}
}
@ -644,19 +613,22 @@ func (w *kqueue) dirChange(dir string) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
}
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
@ -688,11 +660,11 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
info, _ := w.watches.byPath(name)
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME)
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
// Watch file to mimic Linux inotify.
return w.addWatch(name, noteAllEvents, true)
}
// Register events with the queue.
@ -722,9 +694,9 @@ func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
}
func (w *kqueue) xSupports(op Op) bool {
if runtime.GOOS == "freebsd" {
//return true // Supports everything.
}
//if runtime.GOOS == "freebsd" {
// return true // Supports everything.
//}
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false

View file

@ -9,12 +9,11 @@ type other struct {
Errors chan error
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
return newBackend(ev, errs)
}
func (w *other) Close() error { return nil }
func (w *other) WatchList() []string { return nil }
func (w *other) Add(name string) error { return nil }

View file

@ -28,18 +28,16 @@ type readDirChangesW struct {
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
done chan chan<- error
mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number)
closed bool // Set to true when Close() is first called
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(50, ev, errs)
}
var defaultBufferSize = 50
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
@ -50,7 +48,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
quit: make(chan chan<- error, 1),
done: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
@ -70,8 +68,8 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool
event := w.newEvent(name, uint32(mask))
event.renamedFrom = renamedFrom
select {
case ch := <-w.quit:
w.quit <- ch
case ch := <-w.done:
w.done <- ch
case w.Events <- event:
}
return true
@ -83,10 +81,10 @@ func (w *readDirChangesW) sendError(err error) bool {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
case <-w.quit:
return false
}
}
@ -99,9 +97,9 @@ func (w *readDirChangesW) Close() error {
w.closed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
// Send "done" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
w.done <- ch
if err := w.wakeupReader(); err != nil {
return err
}
@ -495,7 +493,7 @@ func (w *readDirChangesW) readEvents() {
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
case ch := <-w.done:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {

View file

@ -244,12 +244,13 @@ var (
// ErrUnsupported is returned by AddWith() when WithOps() specified an
// Unportable event that's not supported on this platform.
//lint:ignore ST1012 not relevant
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
)
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
ev, errs := make(chan Event), make(chan error)
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
b, err := newBackend(ev, errs)
if err != nil {
return nil, err
@ -266,8 +267,8 @@ func NewWatcher() (*Watcher, error) {
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
ev, errs := make(chan Event), make(chan error)
b, err := newBufferedBackend(sz, ev, errs)
ev, errs := make(chan Event, sz), make(chan error)
b, err := newBackend(ev, errs)
if err != nil {
return nil, err
}
@ -337,7 +338,8 @@ func (w *Watcher) Close() error { return w.b.Close() }
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
// The order is undefined, and may differ per call. Returns nil if
// [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
// Supports reports if all the listed operations are supported by this platform.

View file

@ -9,14 +9,14 @@ import (
)
var (
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
func SetRlimit() {
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
var l syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
if err == nil && l.Cur != l.Max {

View file

@ -9,8 +9,8 @@ import (
)
var (
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64

View file

@ -1,4 +1,4 @@
//go:build !windows && !darwin && !freebsd
//go:build !windows && !darwin && !freebsd && !plan9
package internal
@ -9,8 +9,8 @@ import (
)
var (
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64

View file

@ -10,8 +10,8 @@ import (
// Just a dummy.
var (
SyscallEACCES = errors.New("dummy")
UnixEACCES = errors.New("dummy")
ErrSyscallEACCES = errors.New("dummy")
ErrUnixEACCES = errors.New("dummy")
)
func SetRlimit() {}

64
vendor/github.com/fsnotify/fsnotify/shared.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
package fsnotify
import "sync"
type shared struct {
Events chan Event
Errors chan error
done chan struct{}
mu sync.Mutex
}
func newShared(ev chan Event, errs chan error) *shared {
return &shared{
Events: ev,
Errors: errs,
done: make(chan struct{}),
}
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *shared) sendEvent(e Event) bool {
if e.Op == 0 {
return true
}
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *shared) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *shared) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Mark as closed; returns true if it was already closed.
func (w *shared) close() bool {
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
return true
}
close(w.done)
return false
}

3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf generated vendored Normal file
View file

@ -0,0 +1,3 @@
checks = ['all',
'-U1000', # Don't complain about unused functions.
]

View file

@ -16,3 +16,6 @@ indent_style = tab
[*.nix]
indent_size = 2
[.golangci.yaml]
indent_size = 2

View file

@ -1,23 +1,48 @@
run:
timeout: 5m
version: "2"
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/go-viper/mapstructure)
golint:
min-confidence: 0
goimports:
local-prefixes: github.com/go-viper/maptstructure
run:
timeout: 10m
linters:
disable-all: true
enable:
- govet
- ineffassign
# - misspell
- nolintlint
# - revive
disable:
- errcheck
- staticcheck
- unused
settings:
misspell:
locale: US
nolintlint:
allow-unused: false # report any unused nolint directives
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
- staticcheck
# - stylecheck
# - golines
settings:
gci:
sections:
- standard
- default
- localmodule
gofmt:
simplify: true
rewrite-rules:
- pattern: interface{}
replacement: any
exclusions:
paths:
- internal/

View file

@ -1,8 +1,9 @@
# mapstructure
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI)
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?style=flat-square)](https://github.com/go-viper/mapstructure/actions/workflows/ci.yaml)
[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square)
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/go-viper/mapstructure?style=flat-square&color=61CFDD)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-viper/mapstructure/badge?style=flat-square)](https://deps.dev/go/github.com%252Fgo-viper%252Fmapstructure%252Fv2)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
@ -29,7 +30,7 @@ The API is the same, so you don't need to change anything else.
Here is a script that can help you with the migration:
```shell
sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go')
sed -i 's|github.com/mitchellh/mapstructure|github.com/go-viper/mapstructure/v2|g' $(find . -type f -name '*.go')
```
If you need more time to migrate your code, that is absolutely fine.

View file

@ -13,7 +13,7 @@ import (
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// typedDecodeHook takes a raw DecodeHookFunc (an any) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
@ -23,7 +23,7 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2, f3}
potential := []any{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
@ -37,25 +37,25 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
return nil
}
// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// cachedDecodeHook takes a raw DecodeHookFunc (an any) and turns
// it into a closure to be used directly
// if the type fails to convert we return a closure always erroring to keep the previous behaviour
func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) {
func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (any, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return func(from reflect.Value, to reflect.Value) (any, error) {
return f(from.Type(), to.Type(), from.Interface())
}
case DecodeHookFuncKind:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return func(from reflect.Value, to reflect.Value) (any, error) {
return f(from.Kind(), to.Kind(), from.Interface())
}
case DecodeHookFuncValue:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return func(from reflect.Value, to reflect.Value) (any, error) {
return f(from, to)
}
default:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return func(from reflect.Value, to reflect.Value) (any, error) {
return nil, errors.New("invalid decode hook signature")
}
}
@ -67,7 +67,7 @@ func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Va
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Value, to reflect.Value,
) (interface{}, error) {
) (any, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from.Type(), to.Type(), from.Interface())
@ -86,11 +86,11 @@ func DecodeHookExec(
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs))
cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(fs))
for _, f := range fs {
cached = append(cached, cachedDecodeHook(f))
}
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
return func(f reflect.Value, t reflect.Value) (any, error) {
var err error
data := f.Interface()
@ -100,7 +100,11 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
if err != nil {
return nil, err
}
newFrom = reflect.ValueOf(data)
if v, ok := data.(reflect.Value); ok {
newFrom = v
} else {
newFrom = reflect.ValueOf(data)
}
}
return data, nil
@ -110,13 +114,13 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff))
cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(ff))
for _, f := range ff {
cached = append(cached, cachedDecodeHook(f))
}
return func(a, b reflect.Value) (interface{}, error) {
return func(a, b reflect.Value) (any, error) {
var allErrs string
var out interface{}
var out any
var err error
for _, c := range cached {
@ -139,8 +143,8 @@ func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -157,14 +161,37 @@ func StringToSliceHookFunc(sep string) DecodeHookFunc {
}
}
// StringToWeakSliceHookFunc brings back the old (pre-v2) behavior of [StringToSliceHookFunc].
//
// As of mapstructure v2.0.0 [StringToSliceHookFunc] checks if the return type is a string slice.
// This function removes that check.
func StringToWeakSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data any,
) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Slice {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -173,7 +200,29 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
}
// Convert it by parsing
return time.ParseDuration(data.(string))
d, err := time.ParseDuration(data.(string))
return d, wrapTimeParseDurationError(err)
}
}
// StringToTimeLocationHookFunc returns a DecodeHookFunc that converts
// strings to *time.Location.
func StringToTimeLocationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Local) {
return data, nil
}
d, err := time.LoadLocation(data.(string))
return d, wrapTimeParseLocationError(err)
}
}
@ -183,8 +232,8 @@ func StringToURLHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -193,7 +242,9 @@ func StringToURLHookFunc() DecodeHookFunc {
}
// Convert it by parsing
return url.Parse(data.(string))
u, err := url.Parse(data.(string))
return u, wrapUrlError(err)
}
}
@ -203,8 +254,8 @@ func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -215,7 +266,7 @@ func StringToIPHookFunc() DecodeHookFunc {
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
return net.IP{}, fmt.Errorf("failed parsing ip")
}
return ip, nil
@ -228,8 +279,8 @@ func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -239,7 +290,7 @@ func StringToIPNetHookFunc() DecodeHookFunc {
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
return net, wrapNetParseError(err)
}
}
@ -249,8 +300,8 @@ func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -259,7 +310,9 @@ func StringToTimeHookFunc(layout string) DecodeHookFunc {
}
// Convert it by parsing
return time.Parse(layout, data.(string))
ti, err := time.Parse(layout, data.(string))
return ti, wrapTimeParseError(err)
}
}
@ -271,8 +324,8 @@ func StringToTimeHookFunc(layout string) DecodeHookFunc {
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
@ -301,17 +354,17 @@ func WeaklyTypedHook(
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
return func(f reflect.Value, t reflect.Value) (any, error) {
if f.Kind() != reflect.Struct {
return f.Interface(), nil
}
var i interface{} = struct{}{}
var i any = struct{}{}
if t.Type() != reflect.TypeOf(&i).Elem() {
return f.Interface(), nil
}
m := make(map[string]interface{})
m := make(map[string]any)
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
@ -325,8 +378,8 @@ func TextUnmarshallerHookFunc() DecodeHookFuncType {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -352,8 +405,8 @@ func StringToNetIPAddrHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -362,7 +415,9 @@ func StringToNetIPAddrHookFunc() DecodeHookFunc {
}
// Convert it by parsing
return netip.ParseAddr(data.(string))
addr, err := netip.ParseAddr(data.(string))
return addr, wrapNetIPParseAddrError(err)
}
}
@ -372,8 +427,8 @@ func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
@ -382,7 +437,31 @@ func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
}
// Convert it by parsing
return netip.ParseAddrPort(data.(string))
addrPort, err := netip.ParseAddrPort(data.(string))
return addrPort, wrapNetIPParseAddrPortError(err)
}
}
// StringToNetIPPrefixHookFunc returns a DecodeHookFunc that converts
// strings to netip.Prefix.
func StringToNetIPPrefixHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data any,
) (any, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.Prefix{}) {
return data, nil
}
// Convert it by parsing
prefix, err := netip.ParsePrefix(data.(string))
return prefix, wrapNetIPParsePrefixError(err)
}
}
@ -415,178 +494,182 @@ func StringToBasicTypeHookFunc() DecodeHookFunc {
// StringToInt8HookFunc returns a DecodeHookFunc that converts
// strings to int8.
func StringToInt8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 8)
return int8(i64), err
return int8(i64), wrapStrconvNumError(err)
}
}
// StringToUint8HookFunc returns a DecodeHookFunc that converts
// strings to uint8.
func StringToUint8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 8)
return uint8(u64), err
return uint8(u64), wrapStrconvNumError(err)
}
}
// StringToInt16HookFunc returns a DecodeHookFunc that converts
// strings to int16.
func StringToInt16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 16)
return int16(i64), err
return int16(i64), wrapStrconvNumError(err)
}
}
// StringToUint16HookFunc returns a DecodeHookFunc that converts
// strings to uint16.
func StringToUint16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 16)
return uint16(u64), err
return uint16(u64), wrapStrconvNumError(err)
}
}
// StringToInt32HookFunc returns a DecodeHookFunc that converts
// strings to int32.
func StringToInt32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 32)
return int32(i64), err
return int32(i64), wrapStrconvNumError(err)
}
}
// StringToUint32HookFunc returns a DecodeHookFunc that converts
// strings to uint32.
func StringToUint32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 32)
return uint32(u64), err
return uint32(u64), wrapStrconvNumError(err)
}
}
// StringToInt64HookFunc returns a DecodeHookFunc that converts
// strings to int64.
func StringToInt64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseInt(data.(string), 0, 64)
i64, err := strconv.ParseInt(data.(string), 0, 64)
return int64(i64), wrapStrconvNumError(err)
}
}
// StringToUint64HookFunc returns a DecodeHookFunc that converts
// strings to uint64.
func StringToUint64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseUint(data.(string), 0, 64)
u64, err := strconv.ParseUint(data.(string), 0, 64)
return uint64(u64), wrapStrconvNumError(err)
}
}
// StringToIntHookFunc returns a DecodeHookFunc that converts
// strings to int.
func StringToIntHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 0)
return int(i64), err
return int(i64), wrapStrconvNumError(err)
}
}
// StringToUintHookFunc returns a DecodeHookFunc that converts
// strings to uint.
func StringToUintHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 0)
return uint(u64), err
return uint(u64), wrapStrconvNumError(err)
}
}
// StringToFloat32HookFunc returns a DecodeHookFunc that converts
// strings to float32.
func StringToFloat32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
return data, nil
}
// Convert it by parsing
f64, err := strconv.ParseFloat(data.(string), 32)
return float32(f64), err
return float32(f64), wrapStrconvNumError(err)
}
}
// StringToFloat64HookFunc returns a DecodeHookFunc that converts
// strings to float64.
func StringToFloat64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseFloat(data.(string), 64)
f64, err := strconv.ParseFloat(data.(string), 64)
return f64, wrapStrconvNumError(err)
}
}
// StringToBoolHookFunc returns a DecodeHookFunc that converts
// strings to bool.
func StringToBoolHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
return data, nil
}
// Convert it by parsing
return strconv.ParseBool(data.(string))
b, err := strconv.ParseBool(data.(string))
return b, wrapStrconvNumError(err)
}
}
@ -605,26 +688,27 @@ func StringToRuneHookFunc() DecodeHookFunc {
// StringToComplex64HookFunc returns a DecodeHookFunc that converts
// strings to complex64.
func StringToComplex64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
return data, nil
}
// Convert it by parsing
c128, err := strconv.ParseComplex(data.(string), 64)
return complex64(c128), err
return complex64(c128), wrapStrconvNumError(err)
}
}
// StringToComplex128HookFunc returns a DecodeHookFunc that converts
// strings to complex128.
func StringToComplex128HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
return data, nil
}
// Convert it by parsing
return strconv.ParseComplex(data.(string), 128)
c128, err := strconv.ParseComplex(data.(string), 128)
return c128, wrapStrconvNumError(err)
}
}

244
vendor/github.com/go-viper/mapstructure/v2/errors.go generated vendored Normal file
View file

@ -0,0 +1,244 @@
package mapstructure
import (
"errors"
"fmt"
"net"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
// Error interface is implemented by all errors emitted by mapstructure.
//
// Use [errors.As] to check if an error implements this interface.
type Error interface {
error
mapstructure()
}
// DecodeError is a generic error type that holds information about
// a decoding error together with the name of the field that caused the error.
type DecodeError struct {
name string
err error
}
func newDecodeError(name string, err error) *DecodeError {
return &DecodeError{
name: name,
err: err,
}
}
func (e *DecodeError) Name() string {
return e.name
}
func (e *DecodeError) Unwrap() error {
return e.err
}
func (e *DecodeError) Error() string {
return fmt.Sprintf("'%s' %s", e.name, e.err)
}
func (*DecodeError) mapstructure() {}
// ParseError is an error type that indicates a value could not be parsed
// into the expected type.
type ParseError struct {
Expected reflect.Value
Value any
Err error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("cannot parse value as '%s': %s", e.Expected.Type(), e.Err)
}
func (*ParseError) mapstructure() {}
// UnconvertibleTypeError is an error type that indicates a value could not be
// converted to the expected type.
type UnconvertibleTypeError struct {
Expected reflect.Value
Value any
}
func (e *UnconvertibleTypeError) Error() string {
return fmt.Sprintf(
"expected type '%s', got unconvertible type '%s'",
e.Expected.Type(),
reflect.TypeOf(e.Value),
)
}
func (*UnconvertibleTypeError) mapstructure() {}
func wrapStrconvNumError(err error) error {
if err == nil {
return nil
}
if err, ok := err.(*strconv.NumError); ok {
return &strconvNumError{Err: err}
}
return err
}
type strconvNumError struct {
Err *strconv.NumError
}
func (e *strconvNumError) Error() string {
return "strconv." + e.Err.Func + ": " + e.Err.Err.Error()
}
func (e *strconvNumError) Unwrap() error { return e.Err }
func wrapUrlError(err error) error {
if err == nil {
return nil
}
if err, ok := err.(*url.Error); ok {
return &urlError{Err: err}
}
return err
}
type urlError struct {
Err *url.Error
}
func (e *urlError) Error() string {
return fmt.Sprintf("%s", e.Err.Err)
}
func (e *urlError) Unwrap() error { return e.Err }
func wrapNetParseError(err error) error {
if err == nil {
return nil
}
if err, ok := err.(*net.ParseError); ok {
return &netParseError{Err: err}
}
return err
}
type netParseError struct {
Err *net.ParseError
}
func (e *netParseError) Error() string {
return "invalid " + e.Err.Type
}
func (e *netParseError) Unwrap() error { return e.Err }
func wrapTimeParseError(err error) error {
if err == nil {
return nil
}
if err, ok := err.(*time.ParseError); ok {
return &timeParseError{Err: err}
}
return err
}
type timeParseError struct {
Err *time.ParseError
}
func (e *timeParseError) Error() string {
if e.Err.Message == "" {
return fmt.Sprintf("parsing time as %q: cannot parse as %q", e.Err.Layout, e.Err.LayoutElem)
}
return "parsing time " + e.Err.Message
}
func (e *timeParseError) Unwrap() error { return e.Err }
func wrapNetIPParseAddrError(err error) error {
if err == nil {
return nil
}
if errMsg := err.Error(); strings.HasPrefix(errMsg, "ParseAddr") {
errPieces := strings.Split(errMsg, ": ")
return fmt.Errorf("ParseAddr: %s", errPieces[len(errPieces)-1])
}
return err
}
func wrapNetIPParseAddrPortError(err error) error {
if err == nil {
return nil
}
errMsg := err.Error()
if strings.HasPrefix(errMsg, "invalid port ") {
return errors.New("invalid port")
} else if strings.HasPrefix(errMsg, "invalid ip:port ") {
return errors.New("invalid ip:port")
}
return err
}
func wrapNetIPParsePrefixError(err error) error {
if err == nil {
return nil
}
if errMsg := err.Error(); strings.HasPrefix(errMsg, "netip.ParsePrefix") {
errPieces := strings.Split(errMsg, ": ")
return fmt.Errorf("netip.ParsePrefix: %s", errPieces[len(errPieces)-1])
}
return err
}
func wrapTimeParseDurationError(err error) error {
if err == nil {
return nil
}
errMsg := err.Error()
if strings.HasPrefix(errMsg, "time: unknown unit ") {
return errors.New("time: unknown unit")
} else if strings.HasPrefix(errMsg, "time: ") {
idx := strings.LastIndex(errMsg, " ")
return errors.New(errMsg[:idx])
}
return err
}
func wrapTimeParseLocationError(err error) error {
if err == nil {
return nil
}
errMsg := err.Error()
if strings.Contains(errMsg, "unknown time zone") || strings.HasPrefix(errMsg, "time: unknown format") {
return fmt.Errorf("invalid time zone format: %w", err)
}
return err
}

View file

@ -2,30 +2,28 @@
"nodes": {
"cachix": {
"inputs": {
"devenv": "devenv_2",
"devenv": [
"devenv"
],
"flake-compat": [
"devenv",
"flake-compat"
"devenv"
],
"nixpkgs": [
"devenv",
"nixpkgs"
"git-hooks": [
"devenv"
],
"pre-commit-hooks": [
"devenv",
"pre-commit-hooks"
]
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1712055811,
"narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
"lastModified": 1742042642,
"narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=",
"owner": "cachix",
"repo": "cachix",
"rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
"rev": "a624d3eaf4b1d225f918de8543ed739f2f574203",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"type": "github"
}
@ -33,52 +31,21 @@
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat_2",
"nix": "nix_2",
"nixpkgs": "nixpkgs_2",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1717245169,
"narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=",
"owner": "cachix",
"repo": "devenv",
"rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"flake-compat": [
"devenv",
"cachix",
"flake-compat"
],
"flake-compat": "flake-compat",
"git-hooks": "git-hooks",
"nix": "nix",
"nixpkgs": "nixpkgs",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"devenv",
"cachix",
"pre-commit-hooks"
]
"nixpkgs": "nixpkgs_3"
},
"locked": {
"lastModified": 1708704632,
"narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
"lastModified": 1744876578,
"narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=",
"owner": "cachix",
"repo": "devenv",
"rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
"rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "python-rewrite",
"repo": "devenv",
"type": "github"
}
@ -86,27 +53,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
@ -116,15 +67,37 @@
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1717285511,
"narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
"lastModified": 1743550720,
"narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
"rev": "c621e8422220273271f52058f618c94e405bb0f5",
"type": "github"
},
"original": {
@ -133,39 +106,28 @@
"type": "github"
}
},
"flake-utils": {
"git-hooks": {
"inputs": {
"systems": "systems"
"flake-compat": [
"devenv"
],
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"lastModified": 1742649964,
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
@ -173,7 +135,7 @@
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"git-hooks",
"nixpkgs"
]
},
@ -191,166 +153,109 @@
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"libgit2": {
"flake": false,
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"lastModified": 1697646580,
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
"owner": "libgit2",
"repo": "libgit2",
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"owner": "libgit2",
"repo": "libgit2",
"type": "github"
}
},
"nix-github-actions": {
"nix": {
"inputs": {
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
"flake-compat": [
"devenv"
],
"flake-parts": "flake-parts",
"libgit2": "libgit2",
"nixpkgs": "nixpkgs_2",
"nixpkgs-23-11": [
"devenv"
],
"nixpkgs-regression": [
"devenv"
],
"pre-commit-hooks": [
"devenv"
]
},
"locked": {
"lastModified": 1688870561,
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix_2": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression_2"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"lastModified": 1741798497,
"narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"ref": "devenv-2.24",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1692808169,
"narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1717284937,
"narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"lastModified": 1743296961,
"narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-regression_2": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1710695816,
"narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "614b4613980a522ba49f0d194531beddbb7220d3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713361204,
"narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
"lastModified": 1717432640,
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1733477122,
"narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857",
"type": "github"
},
"original": {
@ -360,13 +265,13 @@
"type": "github"
}
},
"nixpkgs_3": {
"nixpkgs_4": {
"locked": {
"lastModified": 1717112898,
"narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
"lastModified": 1744536153,
"narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
"rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11",
"type": "github"
},
"original": {
@ -376,94 +281,11 @@
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1692876271,
"narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1713775815,
"narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_3"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
"flake-parts": "flake-parts_2",
"nixpkgs": "nixpkgs_4"
}
}
},

View file

@ -5,35 +5,42 @@
devenv.url = "github:cachix/devenv";
};
outputs = inputs@{ flake-parts, ... }:
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
systems = [
"x86_64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
perSystem = { config, self', inputs', pkgs, system, ... }: rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
perSystem =
{ pkgs, ... }:
rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
};
packages = with pkgs; [
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
};
packages = with pkgs; [
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
ci = devenv.shells.default;
};
ci = devenv.shells.default;
};
};
};
}

View file

@ -1,5 +1,5 @@
// Package mapstructure exposes functionality to convert one arbitrary
// Go type into another, typically to convert a map[string]interface{}
// Go type into another, typically to convert a map[string]any
// into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
@ -54,8 +54,8 @@
//
// This would require an input that looks like below:
//
// map[string]interface{}{
// "person": map[string]interface{}{"name": "alice"},
// map[string]any{
// "person": map[string]any{"name": "alice"},
// }
//
// If your "person" value is NOT nested, then you can append ",squash" to
@ -68,7 +68,7 @@
//
// Now the following input would be accepted:
//
// map[string]interface{}{
// map[string]any{
// "name": "alice",
// }
//
@ -79,7 +79,7 @@
//
// Will be decoded into a map:
//
// map[string]interface{}{
// map[string]any{
// "name": "alice",
// }
//
@ -95,18 +95,18 @@
//
// You can also use the ",remain" suffix on your tag to collect all unused
// values in a map. The field with this tag MUST be a map type and should
// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
// probably be a "map[string]any" or "map[any]any".
// See example below:
//
// type Friend struct {
// Name string
// Other map[string]interface{} `mapstructure:",remain"`
// Other map[string]any `mapstructure:",remain"`
// }
//
// Given the input below, Other would be populated with the other
// values that weren't used (everything but "name"):
//
// map[string]interface{}{
// map[string]any{
// "name": "bob",
// "address": "123 Maple St.",
// }
@ -115,15 +115,36 @@
//
// When decoding from a struct to any other value, you may use the
// ",omitempty" suffix on your tag to omit that value if it equates to
// the zero value. The zero value of all types is specified in the Go
// specification.
// the zero value, or a zero-length element. The zero value of all types is
// specified in the Go specification.
//
// For example, the zero type of a numeric type is zero ("0"). If the struct
// field value is zero and a numeric type, the field is empty, and it won't
// be encoded into the destination type.
// be encoded into the destination type. And likewise for the URLs field, if the
// slice is nil or empty, it won't be encoded into the destination type.
//
// type Source struct {
// Age int `mapstructure:",omitempty"`
// Age int `mapstructure:",omitempty"`
// URLs []string `mapstructure:",omitempty"`
// }
//
// # Omit Zero Values
//
// When decoding from a struct to any other value, you may use the
// ",omitzero" suffix on your tag to omit that value if it equates to the zero
// value. The zero value of all types is specified in the Go specification.
//
// For example, the zero type of a numeric type is zero ("0"). If the struct
// field value is zero and a numeric type, the field is empty, and it won't
// be encoded into the destination type. And likewise for the URLs field, if the
// slice is nil, it won't be encoded into the destination type.
//
// Note that if the field is a slice, and it is empty but not nil, it will
// still be encoded into the destination type.
//
// type Source struct {
// Age int `mapstructure:",omitzero"`
// URLs []string `mapstructure:",omitzero"`
// }
//
// # Unexported fields
@ -140,7 +161,7 @@
//
// Using this map as input:
//
// map[string]interface{}{
// map[string]any{
// "private": "I will be ignored",
// "Public": "I made it through!",
// }
@ -183,19 +204,19 @@ import (
// we started with Kinds and then realized Types were the better solution,
// but have a promise to not break backwards compat so we now support
// both.
type DecodeHookFunc interface{}
type DecodeHookFunc any
// DecodeHookFuncType is a DecodeHookFunc which has complete information about
// the source and target types.
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
type DecodeHookFuncType func(reflect.Type, reflect.Type, any) (any, error)
// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error)
// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target
// values.
type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error)
// DecoderConfig is the configuration that is used to create a new decoder
// and allows customization of various aspects of decoding.
@ -222,6 +243,12 @@ type DecoderConfig struct {
// will affect all nested structs as well.
ErrorUnset bool
// AllowUnsetPointer, if set to true, will prevent fields with pointer types
// from being reported as unset, even if ErrorUnset is true and the field was
// not present in the input data. This allows pointer fields to be optional
// without triggering an error when they are missing.
AllowUnsetPointer bool
// ZeroFields, if set to true, will zero fields before writing them.
// For example, a map will be emptied before decoded values are put in
// it. If this is false, a map will be merged.
@ -260,7 +287,7 @@ type DecoderConfig struct {
// Result is a pointer to the struct that will contain the decoded
// value.
Result interface{}
Result any
// The tag name that mapstructure reads for field names. This
// defaults to "mapstructure"
@ -292,7 +319,7 @@ type DecoderConfig struct {
// up the most basic Decoder.
type Decoder struct {
config *DecoderConfig
cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error)
cachedDecodeHook func(from reflect.Value, to reflect.Value) (any, error)
}
// Metadata contains information about decoding a structure that
@ -313,7 +340,7 @@ type Metadata struct {
// Decode takes an input structure and uses reflection to translate it to
// the output structure. output must be a pointer to a map or struct.
func Decode(input interface{}, output interface{}) error {
func Decode(input any, output any) error {
config := &DecoderConfig{
Metadata: nil,
Result: output,
@ -329,7 +356,7 @@ func Decode(input interface{}, output interface{}) error {
// WeakDecode is the same as Decode but is shorthand to enable
// WeaklyTypedInput. See DecoderConfig for more info.
func WeakDecode(input, output interface{}) error {
func WeakDecode(input, output any) error {
config := &DecoderConfig{
Metadata: nil,
Result: output,
@ -346,7 +373,7 @@ func WeakDecode(input, output interface{}) error {
// DecodeMetadata is the same as Decode, but is shorthand to
// enable metadata collection. See DecoderConfig for more info.
func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
func DecodeMetadata(input any, output any, metadata *Metadata) error {
config := &DecoderConfig{
Metadata: metadata,
Result: output,
@ -363,7 +390,7 @@ func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) e
// WeakDecodeMetadata is the same as Decode, but is shorthand to
// enable both WeaklyTypedInput and metadata collection. See
// DecoderConfig for more info.
func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
func WeakDecodeMetadata(input any, output any, metadata *Metadata) error {
config := &DecoderConfig{
Metadata: metadata,
Result: output,
@ -430,7 +457,7 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
// Decode decodes the given raw interface to the target pointer specified
// by the configuration.
func (d *Decoder) Decode(input interface{}) error {
func (d *Decoder) Decode(input any) error {
err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
// Retain some of the original behavior when multiple errors ocurr
@ -443,7 +470,7 @@ func (d *Decoder) Decode(input interface{}) error {
}
// isNil returns true if the input is nil or a typed nil pointer.
func isNil(input interface{}) bool {
func isNil(input any) bool {
if input == nil {
return true
}
@ -452,7 +479,7 @@ func isNil(input interface{}) bool {
}
// Decodes an unknown data type into a specific reflection value.
func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
func (d *Decoder) decode(name string, input any, outVal reflect.Value) error {
var (
inputVal = reflect.ValueOf(input)
outputKind = getKind(outVal)
@ -489,10 +516,10 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
// Hooks need a valid inputVal, so reset it to zero value of outVal type.
switch outputKind {
case reflect.Struct, reflect.Map:
var mapVal map[string]interface{}
var mapVal map[string]any
inputVal = reflect.ValueOf(mapVal) // create nil map pointer
case reflect.Slice, reflect.Array:
var sliceVal []interface{}
var sliceVal []any
inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer
default:
inputVal = reflect.Zero(outVal.Type())
@ -504,7 +531,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
var err error
input, err = d.cachedDecodeHook(inputVal, outVal)
if err != nil {
return fmt.Errorf("error decoding '%s': %w", name, err)
return newDecodeError(name, err)
}
}
if isNil(input) {
@ -542,7 +569,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
err = d.decodeFunc(name, input, outVal)
default:
// If we reached this point then we weren't able to decode it
return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind))
}
// If we reached here, then we successfully decoded SOMETHING, so
@ -556,7 +583,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
// This decodes a basic type (bool, int, string, etc.) and sets the
// value to "data" of that type.
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeBasic(name string, data any, val reflect.Value) error {
if val.IsValid() && val.Elem().IsValid() {
elem := val.Elem()
@ -603,16 +630,17 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value)
dataValType := dataVal.Type()
if !dataValType.AssignableTo(val.Type()) {
return fmt.Errorf(
"'%s' expected type '%s', got '%s'",
name, val.Type(), dataValType)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
val.Set(dataVal)
return nil
}
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeString(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
@ -656,15 +684,16 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
}
if !converted {
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
return nil
}
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeInt(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
dataType := dataVal.Type()
@ -692,26 +721,34 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
if err == nil {
val.SetInt(i)
} else {
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: wrapStrconvNumError(err),
})
}
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
jn := data.(json.Number)
i, err := jn.Int64()
if err != nil {
return fmt.Errorf(
"error decoding json.Number into %s: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: err,
})
}
val.SetInt(i)
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
return nil
}
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeUint(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
dataType := dataVal.Type()
@ -720,8 +757,11 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
case dataKind == reflect.Int:
i := dataVal.Int()
if i < 0 && !d.config.WeaklyTypedInput {
return fmt.Errorf("cannot parse '%s', %d overflows uint",
name, i)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: fmt.Errorf("%d overflows uint", i),
})
}
val.SetUint(uint64(i))
case dataKind == reflect.Uint:
@ -729,8 +769,11 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
case dataKind == reflect.Float32:
f := dataVal.Float()
if f < 0 && !d.config.WeaklyTypedInput {
return fmt.Errorf("cannot parse '%s', %f overflows uint",
name, f)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: fmt.Errorf("%f overflows uint", f),
})
}
val.SetUint(uint64(f))
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
@ -749,26 +792,34 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
if err == nil {
val.SetUint(i)
} else {
return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: wrapStrconvNumError(err),
})
}
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
jn := data.(json.Number)
i, err := strconv.ParseUint(string(jn), 0, 64)
if err != nil {
return fmt.Errorf(
"error decoding json.Number into %s: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: wrapStrconvNumError(err),
})
}
val.SetUint(i)
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
return nil
}
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeBool(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
@ -788,18 +839,23 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
} else if dataVal.String() == "" {
val.SetBool(false)
} else {
return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: wrapStrconvNumError(err),
})
}
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'",
name, val, dataVal, data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
return nil
}
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeFloat(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
dataType := dataVal.Type()
@ -827,26 +883,34 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
if err == nil {
val.SetFloat(f)
} else {
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: wrapStrconvNumError(err),
})
}
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
jn := data.(json.Number)
i, err := jn.Float64()
if err != nil {
return fmt.Errorf(
"error decoding json.Number into %s: %s", name, err)
return newDecodeError(name, &ParseError{
Expected: val,
Value: data,
Err: err,
})
}
val.SetFloat(i)
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
return nil
}
func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeComplex(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
@ -854,15 +918,16 @@ func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value
case dataKind == reflect.Complex64:
val.SetComplex(dataVal.Complex())
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
return nil
}
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeMap(name string, data any, val reflect.Value) error {
valType := val.Type()
valKeyType := valType.Key()
valElemType := valType.Elem()
@ -900,7 +965,10 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
fallthrough
default:
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
}
@ -986,7 +1054,10 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
// to the map value.
v := dataVal.Field(i)
if !v.Type().AssignableTo(valMap.Type().Elem()) {
return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
return newDecodeError(
name+"."+f.Name,
fmt.Errorf("cannot assign type %q to map value field of type %q", v.Type(), valMap.Type().Elem()),
)
}
tagValue := f.Tag.Get(d.config.TagName)
@ -1011,6 +1082,11 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
continue
}
// If "omitzero" is specified in the tag, it ignores zero values.
if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() {
continue
}
// If "squash" is specified in the tag, we squash the field down.
squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption)
if squash {
@ -1021,12 +1097,18 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
// The final type must be a struct
if v.Kind() != reflect.Struct {
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
return newDecodeError(
name+"."+f.Name,
fmt.Errorf("cannot squash non-struct type %q", v.Type()),
)
}
} else {
if strings.Index(tagValue[index+1:], "remain") != -1 {
if v.Kind() != reflect.Map {
return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type())
return newDecodeError(
name+"."+f.Name,
fmt.Errorf("error remain-tag field with invalid type: %q", v.Type()),
)
}
ptr := v.MapRange()
@ -1094,7 +1176,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
return nil
}
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
func (d *Decoder) decodePtr(name string, data any, val reflect.Value) (bool, error) {
// If the input data is nil, then we want to just set the output
// pointer to be nil as well.
isNil := data == nil
@ -1141,20 +1223,21 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (b
return false, nil
}
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeFunc(name string, data any, val reflect.Value) error {
// Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type.
dataVal := reflect.Indirect(reflect.ValueOf(data))
if val.Type() != dataVal.Type() {
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
return newDecodeError(name, &UnconvertibleTypeError{
Expected: val,
Value: data,
})
}
val.Set(dataVal)
return nil
}
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeSlice(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataValKind := dataVal.Kind()
valType := val.Type()
@ -1176,7 +1259,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
return nil
}
// Create slice of maps of other sizes
return d.decodeSlice(name, []interface{}{data}, val)
return d.decodeSlice(name, []any{data}, val)
case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
return d.decodeSlice(name, []byte(dataVal.String()), val)
@ -1185,12 +1268,12 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
// and "lift" it into it. i.e. a string becomes a string slice.
default:
// Just re-try this function with data as a slice.
return d.decodeSlice(name, []interface{}{data}, val)
return d.decodeSlice(name, []any{data}, val)
}
}
return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind)
return newDecodeError(name,
fmt.Errorf("source data must be an array or slice, got %s", dataValKind))
}
// If the input value is nil, then don't allocate since empty != nil
@ -1228,7 +1311,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
return errors.Join(errs...)
}
func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeArray(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataValKind := dataVal.Kind()
valType := val.Type()
@ -1253,17 +1336,17 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
// and "lift" it into it. i.e. a string becomes a string array.
default:
// Just re-try this function with data as a slice.
return d.decodeArray(name, []interface{}{data}, val)
return d.decodeArray(name, []any{data}, val)
}
}
return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind)
return newDecodeError(name,
fmt.Errorf("source data must be an array or slice, got %s", dataValKind))
}
if dataVal.Len() > arrayType.Len() {
return fmt.Errorf(
"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
return newDecodeError(name,
fmt.Errorf("expected source data to have length less or equal to %d, got %d", arrayType.Len(), dataVal.Len()))
}
// Make a new array to hold our result, same size as the original data.
@ -1289,7 +1372,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
return errors.Join(errs...)
}
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
func (d *Decoder) decodeStruct(name string, data any, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
// If the type of the value to write to and the data match directly,
@ -1310,7 +1393,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// as an intermediary.
// Make a new map to hold our result
mapType := reflect.TypeOf((map[string]interface{})(nil))
mapType := reflect.TypeOf((map[string]any)(nil))
mval := reflect.MakeMap(mapType)
// Creating a pointer to a map so that other methods can completely
@ -1328,26 +1411,26 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
return result
default:
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
return newDecodeError(name,
fmt.Errorf("expected a map or struct, got %q", dataValKind))
}
}
func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
dataValType := dataVal.Type()
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
return fmt.Errorf(
"'%s' needs a map with string keys, has '%s' keys",
name, dataValType.Key().Kind())
return newDecodeError(name,
fmt.Errorf("needs a map with string keys, has %q keys", kind))
}
dataValKeys := make(map[reflect.Value]struct{})
dataValKeysUnused := make(map[interface{}]struct{})
dataValKeysUnused := make(map[any]struct{})
for _, dataValKey := range dataVal.MapKeys() {
dataValKeys[dataValKey] = struct{}{}
dataValKeysUnused[dataValKey.Interface()] = struct{}{}
}
targetValKeysUnused := make(map[interface{}]struct{})
targetValKeysUnused := make(map[any]struct{})
var errs []error
@ -1410,7 +1493,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
structs = append(structs, fieldVal.Elem().Elem())
}
default:
errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
errs = append(errs, newDecodeError(
name+"."+fieldType.Name,
fmt.Errorf("unsupported type for squash: %s", fieldVal.Kind()),
))
}
continue
}
@ -1461,7 +1547,9 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
if !rawMapVal.IsValid() {
// There was no matching key in the map for the value in
// the struct. Remember it for potential errors and metadata.
targetValKeysUnused[fieldName] = struct{}{}
if !(d.config.AllowUnsetPointer && fieldValue.Kind() == reflect.Ptr) {
targetValKeysUnused[fieldName] = struct{}{}
}
continue
}
}
@ -1495,7 +1583,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
// we put the unused keys directly into the remain field.
if remainField != nil && len(dataValKeysUnused) > 0 {
// Build a map of only the unused values
remain := map[interface{}]interface{}{}
remain := map[any]any{}
for key := range dataValKeysUnused {
remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
}
@ -1517,8 +1605,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
sort.Strings(keys)
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
errs = append(errs, err)
errs = append(errs, newDecodeError(
name,
fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")),
))
}
if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
@ -1528,8 +1618,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
sort.Strings(keys)
err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
errs = append(errs, err)
errs = append(errs, newDecodeError(
name,
fmt.Errorf("has unset fields: %s", strings.Join(keys, ", ")),
))
}
if err := errors.Join(errs...); err != nil {

View file

@ -1,27 +1,37 @@
version: "2"
run:
timeout: 10m
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/sagikazarmark/locafero)
goimports:
local-prefixes: github.com/sagikazarmark/locafero
misspell:
locale: US
nolintlint:
allow-leading-space: false # require machine-readable nolint directives (with no leading space)
allow-unused: false # report any unused nolint directives
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
revive:
confidence: 0
linters:
enable:
- gci
- goimports
- errcheck
- govet
- ineffassign
- misspell
- nolintlint
- revive
- staticcheck
- unused
settings:
misspell:
locale: US
nolintlint:
allow-unused: false # report any unused nolint directives
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
- golines
settings:
gci:
sections:
- standard
- default
- localmodule

View file

@ -2,8 +2,8 @@
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml)
[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square)
[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org)
![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/sagikazarmark/locafero?style=flat-square&color=61CFDD)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/sagikazarmark/locafero/badge?style=flat-square)](https://deps.dev/go/github.com%252Fsagikazarmark%252Flocafero)
**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).**

View file

@ -5,19 +5,23 @@ import "io/fs"
// FileType represents the kind of entries [Finder] can return.
type FileType int
// FileType represents the kind of entries [Finder] can return.
const (
FileTypeAll FileType = iota
FileTypeAny FileType = iota
FileTypeFile
FileTypeDir
// Deprecated: Use [FileTypeAny] instead.
FileTypeAll = FileTypeAny
)
func (ft FileType) matchFileInfo(info fs.FileInfo) bool {
func (ft FileType) match(info fs.FileInfo) bool {
switch ft {
case FileTypeAll:
case FileTypeAny:
return true
case FileTypeFile:
return !info.IsDir()
return info.Mode().IsRegular()
case FileTypeDir:
return info.IsDir()

View file

@ -1,4 +1,4 @@
// Package finder looks for files and directories in an {fs.Fs} filesystem.
// Package locafero looks for files and directories in an {fs.Fs} filesystem.
package locafero
import (
@ -7,7 +7,7 @@ import (
"path/filepath"
"strings"
"github.com/sourcegraph/conc/iter"
"github.com/sourcegraph/conc/pool"
"github.com/spf13/afero"
)
@ -44,65 +44,66 @@ type Finder struct {
// Find looks for files and directories in an [afero.Fs] filesystem.
func (f Finder) Find(fsys afero.Fs) ([]string, error) {
// Arbitrary go routine limit (TODO: make this a parameter)
// pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError()
type searchItem struct {
path string
name string
}
var searchItems []searchItem
p := pool.NewWithResults[[]searchResult]().WithMaxGoroutines(5).WithErrors().WithFirstError()
for _, searchPath := range f.Paths {
searchPath := searchPath
for _, searchName := range f.Names {
searchName := searchName
p.Go(func() ([]searchResult, error) {
// If the name contains any glob character, perform a glob match
if strings.ContainsAny(searchName, globMatch) {
return globWalkSearch(fsys, searchPath, searchName, f.Type)
}
searchItems = append(searchItems, searchItem{searchPath, searchName})
// pool.Go(func() ([]string, error) {
// // If the name contains any glob character, perform a glob match
// if strings.ContainsAny(searchName, globMatch) {
// return globWalkSearch(fsys, searchPath, searchName, f.Type)
// }
//
// return statSearch(fsys, searchPath, searchName, f.Type)
// })
return statSearch(fsys, searchPath, searchName, f.Type)
})
}
}
// allResults, err := pool.Wait()
// if err != nil {
// return nil, err
// }
allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) {
// If the name contains any glob character, perform a glob match
if strings.ContainsAny(item.name, globMatch) {
return globWalkSearch(fsys, item.path, item.name, f.Type)
}
return statSearch(fsys, item.path, item.name, f.Type)
})
searchResults, err := flatten(p.Wait())
if err != nil {
return nil, err
}
var results []string
for _, r := range allResults {
results = append(results, r...)
// Return early if no results were found
if len(searchResults) == 0 {
return nil, nil
}
// Sort results in alphabetical order for now
// sort.Strings(results)
results := make([]string, 0, len(searchResults))
for _, searchResult := range searchResults {
results = append(results, searchResult.path)
}
return results, nil
}
func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) {
var results []string
type searchResult struct {
path string
info fs.FileInfo
}
func flatten[T any](results [][]T, err error) ([]T, error) {
if err != nil {
return nil, err
}
var flattened []T
for _, r := range results {
flattened = append(flattened, r...)
}
return flattened, nil
}
func globWalkSearch(
fsys afero.Fs,
searchPath string,
searchName string,
searchType FileType,
) ([]searchResult, error) {
var results []searchResult
err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error {
if err != nil {
@ -123,7 +124,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT
}
// Skip unmatching type
if !searchType.matchFileInfo(fileInfo) {
if !searchType.match(fileInfo) {
return result
}
@ -133,7 +134,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT
}
if match {
results = append(results, p)
results = append(results, searchResult{p, fileInfo})
}
return result
@ -145,7 +146,12 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT
return results, nil
}
func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) {
func statSearch(
fsys afero.Fs,
searchPath string,
searchName string,
searchType FileType,
) ([]searchResult, error) {
filePath := filepath.Join(searchPath, searchName)
fileInfo, err := fsys.Stat(filePath)
@ -157,9 +163,9 @@ func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType
}
// Skip unmatching type
if !searchType.matchFileInfo(fileInfo) {
if !searchType.match(fileInfo) {
return nil, nil
}
return []string{filePath}, nil
return []searchResult{{filePath, fileInfo}}, nil
}

View file

@ -2,30 +2,32 @@
"nodes": {
"cachix": {
"inputs": {
"devenv": "devenv_2",
"devenv": [
"devenv"
],
"flake-compat": [
"devenv"
],
"git-hooks": [
"devenv",
"flake-compat"
"git-hooks"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"pre-commit-hooks": [
"devenv",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1712055811,
"narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"owner": "cachix",
"repo": "cachix",
"rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"type": "github"
}
@ -33,52 +35,21 @@
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat_2",
"nix": "nix_2",
"nixpkgs": "nixpkgs_2",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1725907707,
"narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=",
"owner": "cachix",
"repo": "devenv",
"rev": "2bbbbc468fc02257265a79652a8350651cca495a",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"flake-compat": [
"devenv",
"cachix",
"flake-compat"
],
"flake-compat": "flake-compat",
"git-hooks": "git-hooks",
"nix": "nix",
"nixpkgs": "nixpkgs",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"devenv",
"cachix",
"pre-commit-hooks"
]
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1708704632,
"narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
"lastModified": 1753981111,
"narHash": "sha256-uBJOyMxOkGRmxhD2M5rbN2aV6oP1T2AKq5oBaHHC4mw=",
"owner": "cachix",
"repo": "devenv",
"rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
"rev": "d4d70df706b153b601a87ab8e81c88a0b1a373b6",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "python-rewrite",
"repo": "devenv",
"type": "github"
}
@ -86,27 +57,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
@ -116,15 +71,37 @@
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1733312601,
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1725234343,
"narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=",
"lastModified": 1753121425,
"narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "567b938d64d4b4112ee253b9274472dc3a346eb6",
"rev": "644e0fc48951a860279da645ba77fe4a6e814c5e",
"type": "github"
},
"original": {
@ -133,39 +110,29 @@
"type": "github"
}
},
"flake-utils": {
"git-hooks": {
"inputs": {
"systems": "systems"
"flake-compat": [
"devenv",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"lastModified": 1750779888,
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
@ -173,7 +140,7 @@
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"git-hooks",
"nixpkgs"
]
},
@ -192,165 +159,49 @@
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688870561,
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix_2": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-parts": "flake-parts",
"git-hooks-nix": [
"devenv",
"git-hooks"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression_2"
"nixpkgs-23-11": [
"devenv"
],
"nixpkgs-regression": [
"devenv"
]
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"lastModified": 1752773918,
"narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=",
"owner": "cachix",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"rev": "031c3cf42d2e9391eee373507d8c12e0f9606779",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"owner": "cachix",
"ref": "devenv-2.30",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1692808169,
"narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1725233747,
"narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-regression_2": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1710695816,
"narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "614b4613980a522ba49f0d194531beddbb7220d3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713361204,
"narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
"lastModified": 1750441195,
"narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"rev": "0ceffe312871b443929ff3006960d29b120dc627",
"type": "github"
},
"original": {
@ -360,110 +211,42 @@
"type": "github"
}
},
"nixpkgs_3": {
"nixpkgs-lib": {
"locked": {
"lastModified": 1725910328,
"narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1692876271,
"narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
"lastModified": 1751159883,
"narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
"repo": "nixpkgs.lib",
"rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713775815,
"narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
"lastModified": 1753939845,
"narHash": "sha256-K2ViRJfdVGE8tpJejs8Qpvvejks1+A4GQej/lBk5y7I=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "94def634a20494ee057c76998843c015909d6311",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_3"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
"flake-parts": "flake-parts_2",
"nixpkgs": "nixpkgs_2"
}
}
},

View file

@ -1,64 +1,42 @@
{
description = "Finder library for Afero";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
devenv.url = "github:cachix/devenv";
};
outputs = inputs@{ flake-parts, ... }:
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [ "x86_64-linux" "aarch64-darwin" ];
systems = [
"x86_64-linux"
"aarch64-darwin"
];
perSystem = { config, self', inputs', pkgs, system, ... }: rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
go.package = pkgs.lib.mkDefault pkgs.go_1_23;
};
perSystem =
{ pkgs, ... }:
{
devenv.shells = {
default = {
languages = {
go.enable = true;
go.package = pkgs.lib.mkDefault pkgs.go_1_24;
};
packages = with pkgs; [
just
packages = with pkgs; [
just
golangci-lint
];
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
ci = devenv.shells.default;
ci_1_21 = {
imports = [ devenv.shells.ci ];
languages = {
go.package = pkgs.go_1_21;
};
};
ci_1_22 = {
imports = [ devenv.shells.ci ];
languages = {
go.package = pkgs.go_1_22;
};
};
ci_1_23 = {
imports = [ devenv.shells.ci ];
languages = {
go.package = pkgs.go_1_23;
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
};
};
};
};
}

View file

@ -2,10 +2,13 @@ default:
just --list
test:
go test -race -v ./...
go test -count 10 -shuffle on -race -v ./...
fuzz:
go test -race -v -fuzz=Fuzz -fuzztime=60s ./...
lint:
golangci-lint run
fmt:
golangci-lint run --fix
golangci-lint fmt

24
vendor/github.com/sourcegraph/conc/Makefile generated vendored Normal file
View file

@ -0,0 +1,24 @@
.DEFAULT_GOAL := help
GO_BIN ?= $(shell go env GOPATH)/bin
.PHONY: help
help:
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
$(GO_BIN)/golangci-lint:
@echo "==> Installing golangci-lint within "${GO_BIN}""
@go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest
.PHONY: lint
lint: $(GO_BIN)/golangci-lint ## Run linting on Go files
@echo "==> Linting Go source files"
@golangci-lint run -v --fix -c .golangci.yml ./...
.PHONY: test
test: ## Run tests
go test -race -v ./... -coverprofile ./coverage.txt
.PHONY: bench
bench: ## Run benchmarks. See https://pkg.go.dev/cmd/go#hdr-Testing_flags
go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -cpu 1 -benchmem

View file

@ -1,10 +0,0 @@
//go:build !go1.20
// +build !go1.20
package multierror
import "go.uber.org/multierr"
var (
Join = multierr.Combine
)

View file

@ -1,10 +0,0 @@
//go:build go1.20
// +build go1.20
package multierror
import "errors"
var (
Join = errors.Join
)

View file

@ -1,85 +0,0 @@
package iter
import (
"runtime"
"sync/atomic"
"github.com/sourcegraph/conc"
)
// defaultMaxGoroutines returns the default maximum number of
// goroutines to use within this package.
func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) }
// Iterator can be used to configure the behaviour of ForEach
// and ForEachIdx. The zero value is safe to use with reasonable
// defaults.
//
// Iterator is also safe for reuse and concurrent use.
type Iterator[T any] struct {
// MaxGoroutines controls the maximum number of goroutines
// to use on this Iterator's methods.
//
// If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0).
MaxGoroutines int
}
// ForEach executes f in parallel over each element in input.
//
// It is safe to mutate the input parameter, which makes it
// possible to map in place.
//
// ForEach always uses at most runtime.GOMAXPROCS goroutines.
// It takes roughly 2µs to start up the goroutines and adds
// an overhead of roughly 50ns per element of input. For
// a configurable goroutine limit, use a custom Iterator.
func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) }
// ForEach executes f in parallel over each element in input,
// using up to the Iterator's configured maximum number of
// goroutines.
//
// It is safe to mutate the input parameter, which makes it
// possible to map in place.
//
// It takes roughly 2µs to start up the goroutines and adds
// an overhead of roughly 50ns per element of input.
func (iter Iterator[T]) ForEach(input []T, f func(*T)) {
iter.ForEachIdx(input, func(_ int, t *T) {
f(t)
})
}
// ForEachIdx is the same as ForEach except it also provides the
// index of the element to the callback.
func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) }
// ForEachIdx is the same as ForEach except it also provides the
// index of the element to the callback.
func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) {
if iter.MaxGoroutines == 0 {
// iter is a value receiver and is hence safe to mutate
iter.MaxGoroutines = defaultMaxGoroutines()
}
numInput := len(input)
if iter.MaxGoroutines > numInput {
// No more concurrent tasks than the number of input items.
iter.MaxGoroutines = numInput
}
var idx atomic.Int64
// Create the task outside the loop to avoid extra closure allocations.
task := func() {
i := int(idx.Add(1) - 1)
for ; i < numInput; i = int(idx.Add(1) - 1) {
f(i, &input[i])
}
}
var wg conc.WaitGroup
for i := 0; i < iter.MaxGoroutines; i++ {
wg.Go(task)
}
wg.Wait()
}

View file

@ -1,65 +0,0 @@
package iter
import (
"sync"
"github.com/sourcegraph/conc/internal/multierror"
)
// Mapper is an Iterator with a result type R. It can be used to configure
// the behaviour of Map and MapErr. The zero value is safe to use with
// reasonable defaults.
//
// Mapper is also safe for reuse and concurrent use.
type Mapper[T, R any] Iterator[T]
// Map applies f to each element of input, returning the mapped result.
//
// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable
// goroutine limit, use a custom Mapper.
func Map[T, R any](input []T, f func(*T) R) []R {
return Mapper[T, R]{}.Map(input, f)
}
// Map applies f to each element of input, returning the mapped result.
//
// Map uses up to the configured Mapper's maximum number of goroutines.
func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R {
res := make([]R, len(input))
Iterator[T](m).ForEachIdx(input, func(i int, t *T) {
res[i] = f(t)
})
return res
}
// MapErr applies f to each element of the input, returning the mapped result
// and a combined error of all returned errors.
//
// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable
// goroutine limit, use a custom Mapper.
func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) {
return Mapper[T, R]{}.MapErr(input, f)
}
// MapErr applies f to each element of the input, returning the mapped result
// and a combined error of all returned errors.
//
// Map uses up to the configured Mapper's maximum number of goroutines.
func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) {
var (
res = make([]R, len(input))
errMux sync.Mutex
errs error
)
Iterator[T](m).ForEachIdx(input, func(i int, t *T) {
var err error
res[i], err = f(t)
if err != nil {
errMux.Lock()
// TODO: use stdlib errors once multierrors land in go 1.20
errs = multierror.Join(errs, err)
errMux.Unlock()
}
})
return res, errs
}

104
vendor/github.com/sourcegraph/conc/pool/context_pool.go generated vendored Normal file
View file

@ -0,0 +1,104 @@
package pool
import (
"context"
)
// ContextPool is a pool that runs tasks that take a context.
// A new ContextPool should be created with `New().WithContext(ctx)`.
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
type ContextPool struct {
errorPool ErrorPool
ctx context.Context
cancel context.CancelFunc
cancelOnError bool
}
// Go submits a task. If it returns an error, the error will be
// collected and returned by Wait(). If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *ContextPool) Go(f func(ctx context.Context) error) {
p.errorPool.Go(func() error {
if p.cancelOnError {
// If we are cancelling on error, then we also want to cancel if a
// panic is raised. To do this, we need to recover, cancel, and then
// re-throw the caught panic.
defer func() {
if r := recover(); r != nil {
p.cancel()
panic(r)
}
}()
}
err := f(p.ctx)
if err != nil && p.cancelOnError {
// Leaky abstraction warning: We add the error directly because
// otherwise, canceling could cause another goroutine to exit and
// return an error before this error was added, which breaks the
// expectations of WithFirstError().
p.errorPool.addErr(err)
p.cancel()
return nil
}
return err
})
}
// Wait cleans up all spawned goroutines, propagates any panics, and
// returns an error if any of the tasks errored.
func (p *ContextPool) Wait() error {
// Make sure we call cancel after pool is done to avoid memory leakage.
defer p.cancel()
return p.errorPool.Wait()
}
// WithFirstError configures the pool to only return the first error
// returned by a task. By default, Wait() will return a combined error.
// This is particularly useful for (*ContextPool).WithCancelOnError(),
// where all errors after the first are likely to be context.Canceled.
func (p *ContextPool) WithFirstError() *ContextPool {
p.panicIfInitialized()
p.errorPool.WithFirstError()
return p
}
// WithCancelOnError configures the pool to cancel its context as soon as
// any task returns an error or panics. By default, the pool's context is not
// canceled until the parent context is canceled.
//
// In this case, all errors returned from the pool after the first will
// likely be context.Canceled - you may want to also use
// (*ContextPool).WithFirstError() to configure the pool to only return
// the first error.
func (p *ContextPool) WithCancelOnError() *ContextPool {
p.panicIfInitialized()
p.cancelOnError = true
return p
}
// WithFailFast is an alias for the combination of WithFirstError and
// WithCancelOnError. By default, the errors from all tasks are returned and
// the pool's context is not canceled until the parent context is canceled.
func (p *ContextPool) WithFailFast() *ContextPool {
p.panicIfInitialized()
p.WithFirstError()
p.WithCancelOnError()
return p
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool {
p.panicIfInitialized()
p.errorPool.WithMaxGoroutines(n)
return p
}
func (p *ContextPool) panicIfInitialized() {
p.errorPool.panicIfInitialized()
}

100
vendor/github.com/sourcegraph/conc/pool/error_pool.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
package pool
import (
"context"
"errors"
"sync"
)
// ErrorPool is a pool that runs tasks that may return an error.
// Errors are collected and returned by Wait().
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
//
// A new ErrorPool should be created using `New().WithErrors()`.
type ErrorPool struct {
pool Pool
onlyFirstError bool
mu sync.Mutex
errs []error
}
// Go submits a task to the pool. If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *ErrorPool) Go(f func() error) {
p.pool.Go(func() {
p.addErr(f())
})
}
// Wait cleans up any spawned goroutines, propagating any panics and
// returning any errors from tasks.
func (p *ErrorPool) Wait() error {
p.pool.Wait()
errs := p.errs
p.errs = nil // reset errs
if len(errs) == 0 {
return nil
} else if p.onlyFirstError {
return errs[0]
} else {
return errors.Join(errs...)
}
}
// WithContext converts the pool to a ContextPool for tasks that should
// run under the same context, such that they each respect shared cancellation.
// For example, WithCancelOnError can be configured on the returned pool to
// signal that all goroutines should be cancelled upon the first error.
func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool {
p.panicIfInitialized()
ctx, cancel := context.WithCancel(ctx)
return &ContextPool{
errorPool: p.deref(),
ctx: ctx,
cancel: cancel,
}
}
// WithFirstError configures the pool to only return the first error
// returned by a task. By default, Wait() will return a combined error.
func (p *ErrorPool) WithFirstError() *ErrorPool {
p.panicIfInitialized()
p.onlyFirstError = true
return p
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool {
p.panicIfInitialized()
p.pool.WithMaxGoroutines(n)
return p
}
// deref is a helper that creates a shallow copy of the pool with the same
// settings. We don't want to just dereference the pointer because that makes
// the copylock lint angry.
func (p *ErrorPool) deref() ErrorPool {
return ErrorPool{
pool: p.pool.deref(),
onlyFirstError: p.onlyFirstError,
}
}
func (p *ErrorPool) panicIfInitialized() {
p.pool.panicIfInitialized()
}
func (p *ErrorPool) addErr(err error) {
if err != nil {
p.mu.Lock()
p.errs = append(p.errs, err)
p.mu.Unlock()
}
}

174
vendor/github.com/sourcegraph/conc/pool/pool.go generated vendored Normal file
View file

@ -0,0 +1,174 @@
package pool
import (
"context"
"sync"
"github.com/sourcegraph/conc"
)
// New creates a new Pool.
func New() *Pool {
return &Pool{}
}
// Pool is a pool of goroutines used to execute tasks concurrently.
//
// Tasks are submitted with Go(). Once all your tasks have been submitted, you
// must call Wait() to clean up any spawned goroutines and propagate any
// panics.
//
// Goroutines are started lazily, so creating a new pool is cheap. There will
// never be more goroutines spawned than there are tasks submitted.
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
//
// Pool is efficient, but not zero cost. It should not be used for very short
// tasks. Startup and teardown come with an overhead of around 1µs, and each
// task has an overhead of around 300ns.
type Pool struct {
handle conc.WaitGroup
limiter limiter
tasks chan func()
initOnce sync.Once
}
// Go submits a task to be run in the pool. If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *Pool) Go(f func()) {
p.init()
if p.limiter == nil {
// No limit on the number of goroutines.
select {
case p.tasks <- f:
// A goroutine was available to handle the task.
default:
// No goroutine was available to handle the task.
// Spawn a new one and send it the task.
p.handle.Go(func() {
p.worker(f)
})
}
} else {
select {
case p.limiter <- struct{}{}:
// If we are below our limit, spawn a new worker rather
// than waiting for one to become available.
p.handle.Go(func() {
p.worker(f)
})
case p.tasks <- f:
// A worker is available and has accepted the task.
return
}
}
}
// Wait cleans up spawned goroutines, propagating any panics that were
// raised by a tasks.
func (p *Pool) Wait() {
p.init()
close(p.tasks)
// After Wait() returns, reset the struct so tasks will be reinitialized on
// next use. This better matches the behavior of sync.WaitGroup
defer func() { p.initOnce = sync.Once{} }()
p.handle.Wait()
}
// MaxGoroutines returns the maximum size of the pool.
func (p *Pool) MaxGoroutines() int {
return p.limiter.limit()
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *Pool) WithMaxGoroutines(n int) *Pool {
p.panicIfInitialized()
if n < 1 {
panic("max goroutines in a pool must be greater than zero")
}
p.limiter = make(limiter, n)
return p
}
// init ensures that the pool is initialized before use. This makes the
// zero value of the pool usable.
func (p *Pool) init() {
p.initOnce.Do(func() {
p.tasks = make(chan func())
})
}
// panicIfInitialized will trigger a panic if a configuration method is called
// after the pool has started any goroutines for the first time. In the case that
// new settings are needed, a new pool should be created.
func (p *Pool) panicIfInitialized() {
if p.tasks != nil {
panic("pool can not be reconfigured after calling Go() for the first time")
}
}
// WithErrors converts the pool to an ErrorPool so the submitted tasks can
// return errors.
func (p *Pool) WithErrors() *ErrorPool {
p.panicIfInitialized()
return &ErrorPool{
pool: p.deref(),
}
}
// deref is a helper that creates a shallow copy of the pool with the same
// settings. We don't want to just dereference the pointer because that makes
// the copylock lint angry.
func (p *Pool) deref() Pool {
p.panicIfInitialized()
return Pool{
limiter: p.limiter,
}
}
// WithContext converts the pool to a ContextPool for tasks that should
// run under the same context, such that they each respect shared cancellation.
// For example, WithCancelOnError can be configured on the returned pool to
// signal that all goroutines should be cancelled upon the first error.
func (p *Pool) WithContext(ctx context.Context) *ContextPool {
p.panicIfInitialized()
ctx, cancel := context.WithCancel(ctx)
return &ContextPool{
errorPool: p.WithErrors().deref(),
ctx: ctx,
cancel: cancel,
}
}
func (p *Pool) worker(initialFunc func()) {
// The only time this matters is if the task panics.
// This makes it possible to spin up new workers in that case.
defer p.limiter.release()
if initialFunc != nil {
initialFunc()
}
for f := range p.tasks {
f()
}
}
type limiter chan struct{}
func (l limiter) limit() int {
return cap(l)
}
func (l limiter) release() {
if l != nil {
<-l
}
}

View file

@ -0,0 +1,85 @@
package pool
import (
"context"
)
// ResultContextPool is a pool that runs tasks that take a context and return a
// result. The context passed to the task will be canceled if any of the tasks
// return an error, which makes its functionality different than just capturing
// a context with the task closure.
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
type ResultContextPool[T any] struct {
contextPool ContextPool
agg resultAggregator[T]
collectErrored bool
}
// Go submits a task to the pool. If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) {
idx := p.agg.nextIndex()
p.contextPool.Go(func(ctx context.Context) error {
res, err := f(ctx)
p.agg.save(idx, res, err != nil)
return err
})
}
// Wait cleans up all spawned goroutines, propagates any panics, and
// returns an error if any of the tasks errored.
func (p *ResultContextPool[T]) Wait() ([]T, error) {
err := p.contextPool.Wait()
results := p.agg.collect(p.collectErrored)
p.agg = resultAggregator[T]{}
return results, err
}
// WithCollectErrored configures the pool to still collect the result of a task
// even if the task returned an error. By default, the result of tasks that errored
// are ignored and only the error is collected.
func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] {
p.panicIfInitialized()
p.collectErrored = true
return p
}
// WithFirstError configures the pool to only return the first error
// returned by a task. By default, Wait() will return a combined error.
func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] {
p.panicIfInitialized()
p.contextPool.WithFirstError()
return p
}
// WithCancelOnError configures the pool to cancel its context as soon as
// any task returns an error. By default, the pool's context is not
// canceled until the parent context is canceled.
func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] {
p.panicIfInitialized()
p.contextPool.WithCancelOnError()
return p
}
// WithFailFast is an alias for the combination of WithFirstError and
// WithCancelOnError. By default, the errors from all tasks are returned and
// the pool's context is not canceled until the parent context is canceled.
func (p *ResultContextPool[T]) WithFailFast() *ResultContextPool[T] {
p.panicIfInitialized()
p.contextPool.WithFailFast()
return p
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] {
p.panicIfInitialized()
p.contextPool.WithMaxGoroutines(n)
return p
}
func (p *ResultContextPool[T]) panicIfInitialized() {
p.contextPool.panicIfInitialized()
}

View file

@ -0,0 +1,80 @@
package pool
import (
"context"
)
// ResultErrorPool is a pool that executes tasks that return a generic result
// type and an error. Tasks are executed in the pool with Go(), then the
// results of the tasks are returned by Wait().
//
// The order of the results is guaranteed to be the same as the order the
// tasks were submitted.
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
type ResultErrorPool[T any] struct {
errorPool ErrorPool
agg resultAggregator[T]
collectErrored bool
}
// Go submits a task to the pool. If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *ResultErrorPool[T]) Go(f func() (T, error)) {
idx := p.agg.nextIndex()
p.errorPool.Go(func() error {
res, err := f()
p.agg.save(idx, res, err != nil)
return err
})
}
// Wait cleans up any spawned goroutines, propagating any panics and
// returning the results and any errors from tasks.
func (p *ResultErrorPool[T]) Wait() ([]T, error) {
err := p.errorPool.Wait()
results := p.agg.collect(p.collectErrored)
p.agg = resultAggregator[T]{} // reset for reuse
return results, err
}
// WithCollectErrored configures the pool to still collect the result of a task
// even if the task returned an error. By default, the result of tasks that errored
// are ignored and only the error is collected.
func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] {
p.panicIfInitialized()
p.collectErrored = true
return p
}
// WithContext converts the pool to a ResultContextPool for tasks that should
// run under the same context, such that they each respect shared cancellation.
// For example, WithCancelOnError can be configured on the returned pool to
// signal that all goroutines should be cancelled upon the first error.
func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] {
p.panicIfInitialized()
return &ResultContextPool[T]{
contextPool: *p.errorPool.WithContext(ctx),
}
}
// WithFirstError configures the pool to only return the first error
// returned by a task. By default, Wait() will return a combined error.
func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] {
p.panicIfInitialized()
p.errorPool.WithFirstError()
return p
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] {
p.panicIfInitialized()
p.errorPool.WithMaxGoroutines(n)
return p
}
func (p *ResultErrorPool[T]) panicIfInitialized() {
p.errorPool.panicIfInitialized()
}

142
vendor/github.com/sourcegraph/conc/pool/result_pool.go generated vendored Normal file
View file

@ -0,0 +1,142 @@
package pool
import (
"context"
"sort"
"sync"
)
// NewWithResults creates a new ResultPool for tasks with a result of type T.
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
func NewWithResults[T any]() *ResultPool[T] {
return &ResultPool[T]{
pool: *New(),
}
}
// ResultPool is a pool that executes tasks that return a generic result type.
// Tasks are executed in the pool with Go(), then the results of the tasks are
// returned by Wait().
//
// The order of the results is guaranteed to be the same as the order the
// tasks were submitted.
type ResultPool[T any] struct {
pool Pool
agg resultAggregator[T]
}
// Go submits a task to the pool. If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *ResultPool[T]) Go(f func() T) {
idx := p.agg.nextIndex()
p.pool.Go(func() {
p.agg.save(idx, f(), false)
})
}
// Wait cleans up all spawned goroutines, propagating any panics, and returning
// a slice of results from tasks that did not panic.
func (p *ResultPool[T]) Wait() []T {
p.pool.Wait()
results := p.agg.collect(true)
p.agg = resultAggregator[T]{} // reset for reuse
return results
}
// MaxGoroutines returns the maximum size of the pool.
func (p *ResultPool[T]) MaxGoroutines() int {
return p.pool.MaxGoroutines()
}
// WithErrors converts the pool to an ResultErrorPool so the submitted tasks
// can return errors.
func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] {
p.panicIfInitialized()
return &ResultErrorPool[T]{
errorPool: *p.pool.WithErrors(),
}
}
// WithContext converts the pool to a ResultContextPool for tasks that should
// run under the same context, such that they each respect shared cancellation.
// For example, WithCancelOnError can be configured on the returned pool to
// signal that all goroutines should be cancelled upon the first error.
func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] {
p.panicIfInitialized()
return &ResultContextPool[T]{
contextPool: *p.pool.WithContext(ctx),
}
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] {
p.panicIfInitialized()
p.pool.WithMaxGoroutines(n)
return p
}
func (p *ResultPool[T]) panicIfInitialized() {
p.pool.panicIfInitialized()
}
// resultAggregator is a utility type that lets us safely append from multiple
// goroutines. The zero value is valid and ready to use.
type resultAggregator[T any] struct {
mu sync.Mutex
len int
results []T
errored []int
}
// nextIndex reserves a slot for a result. The returned value should be passed
// to save() when adding a result to the aggregator.
func (r *resultAggregator[T]) nextIndex() int {
r.mu.Lock()
defer r.mu.Unlock()
nextIdx := r.len
r.len += 1
return nextIdx
}
func (r *resultAggregator[T]) save(i int, res T, errored bool) {
r.mu.Lock()
defer r.mu.Unlock()
if i >= len(r.results) {
old := r.results
r.results = make([]T, r.len)
copy(r.results, old)
}
r.results[i] = res
if errored {
r.errored = append(r.errored, i)
}
}
// collect returns the set of aggregated results.
func (r *resultAggregator[T]) collect(collectErrored bool) []T {
if !r.mu.TryLock() {
panic("collect should not be called until all goroutines have exited")
}
if collectErrored || len(r.errored) == 0 {
return r.results
}
filtered := r.results[:0]
sort.Ints(r.errored)
for i, e := range r.errored {
if i == 0 {
filtered = append(filtered, r.results[:e]...)
} else {
filtered = append(filtered, r.results[r.errored[i-1]+1:e]...)
}
}
return filtered
}

View file

@ -10,3 +10,6 @@ trim_trailing_whitespace = true
[*.go]
indent_style = tab
[{*.yml,*.yaml}]
indent_size = 2

View file

@ -1,18 +1,48 @@
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/spf13/afero)
version: "2"
run:
timeout: 10m
linters:
disable-all: true
enable:
- gci
- gofmt
- gofumpt
- staticcheck
enable:
- govet
- ineffassign
- misspell
- nolintlint
# - revive
- staticcheck
- unused
issues:
exclude-dirs:
- gcsfs/internal/stiface
disable:
- errcheck
# - staticcheck
settings:
misspell:
locale: US
nolintlint:
allow-unused: false # report any unused nolint directives
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
exclusions:
paths:
- gcsfs/internal/stiface
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
- golines
settings:
gci:
sections:
- standard
- default
- localmodule
exclusions:
paths:
- gcsfs/internal/stiface

View file

@ -1,442 +1,474 @@
![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
A FileSystem Abstraction System for Go
[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Overview
Afero is a filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
Afero is also a library providing a base set of interoperable backend
filesystems that make it easy to work with, while retaining all the power
and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
It is suitable for use in any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
<img src="https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png" alt="afero logo-sm"/>
## Afero Features
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&amp;style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI)
[![GoDoc](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero)](https://goreportcard.com/report/github.com/spf13/afero)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square")
* A single consistent API for accessing a variety of filesystems
* Interoperation between a variety of file system types
* A set of interfaces to encourage and enforce interoperability between backends
* An atomic cross platform memory backed file system
* Support for compositional (union) file systems by combining multiple file systems acting as one
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
* A set of utility functions ported from io, ioutil & hugo to be afero aware
* Wrapper for go 1.16 filesystem abstraction `io/fs.FS`
# Using Afero
# Afero: The Universal Filesystem Abstraction for Go
Afero is easy to use and easier to adopt.
Afero is a powerful and extensible filesystem abstraction system for Go. It provides a single, unified API for interacting with diverse filesystems—including the local disk, memory, archives, and network storage.
A few different ways you could use Afero:
Afero acts as a drop-in replacement for the standard `os` package, enabling you to write modular code that is agnostic to the underlying storage, dramatically simplifies testing, and allows for sophisticated architectural patterns through filesystem composition.
* Use the interfaces alone to define your own file system.
* Wrapper for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
## Why Afero?
## Step 1: Install Afero
Afero elevates filesystem interaction beyond simple file reading and writing, offering solutions for testability, flexibility, and advanced architecture.
First use go get to install the latest version of the library.
🔑 **Key Features:**
$ go get github.com/spf13/afero
* **Universal API:** Write your code once. Run it against the local OS, in-memory storage, ZIP/TAR archives, or remote systems (SFTP, GCS).
* **Ultimate Testability:** Utilize `MemMapFs`, a fully concurrent-safe, read/write in-memory filesystem. Write fast, isolated, and reliable unit tests without touching the physical disk or worrying about cleanup.
* **Powerful Composition:** Afero's hidden superpower. Layer filesystems on top of each other to create sophisticated behaviors:
* **Sandboxing:** Use `CopyOnWriteFs` to create temporary scratch spaces that isolate changes from the base filesystem.
* **Caching:** Use `CacheOnReadFs` to automatically layer a fast cache (like memory) over a slow backend (like a network drive).
* **Security Jails:** Use `BasePathFs` to restrict application access to a specific subdirectory (chroot).
* **`os` Package Compatibility:** Afero mirrors the functions in the standard `os` package, making adoption and refactoring seamless.
* **`io/fs` Compatibility:** Fully compatible with the Go standard library's `io/fs` interfaces.
## Installation
```bash
go get github.com/spf13/afero
```
Next include Afero in your application.
```go
import "github.com/spf13/afero"
```
## Step 2: Declare a backend
## Quick Start: The Power of Abstraction
First define a package variable and set it to a pointer to a filesystem.
```go
var AppFs = afero.NewMemMapFs()
The core of Afero is the `afero.Fs` interface. By designing your functions to accept this interface rather than calling `os.*` functions directly, your code instantly becomes more flexible and testable.
or
### 1. Refactor Your Code
var AppFs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
OsFs it will still use the same underlying filesystem but will reduce
the ability to drop in other filesystems as desired.
## Step 3: Use it like you would the OS package
Throughout your application use any function and method like you normally
would.
So if my application before had:
```go
os.Open("/tmp/foo")
```
We would replace it with:
```go
AppFs.Open("/tmp/foo")
```
`AppFs` being the variable we defined above.
## List of all available functions
File System Methods Available:
```go
Chmod(name string, mode os.FileMode) : error
Chown(name string, uid, gid int) : error
Chtimes(name string, atime time.Time, mtime time.Time) : error
Create(name string) : File, error
Mkdir(name string, perm os.FileMode) : error
MkdirAll(path string, perm os.FileMode) : error
Name() : string
Open(name string) : File, error
OpenFile(name string, flag int, perm os.FileMode) : File, error
Remove(name string) : error
RemoveAll(path string) : error
Rename(oldname, newname string) : error
Stat(name string) : os.FileInfo, error
```
File Interfaces and Methods Available:
```go
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() : string
Readdir(count int) : []os.FileInfo, error
Readdirnames(n int) : []string, error
Stat() : os.FileInfo, error
Sync() : error
Truncate(size int64) : error
WriteString(s string) : ret int, err error
```
In some applications it may make sense to define a new package that
simply exports the file system variable for easy access from anywhere.
## Using Afero's utility functions
Afero provides a set of functions to make it easier to use the underlying file systems.
These functions have been primarily ported from io & ioutil with some developed for Hugo.
The afero utilities support all afero compatible backends.
The list of utilities includes:
Change functions that rely on the `os` package to accept `afero.Fs`.
```go
DirExists(path string) (bool, error)
Exists(path string) (bool, error)
FileContainsBytes(filename string, subslice []byte) (bool, error)
GetTempDir(subPath string) string
IsDir(path string) (bool, error)
IsEmpty(path string) (bool, error)
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
SafeWriteReader(path string, r io.Reader) (err error)
TempDir(dir, prefix string) (name string, err error)
TempFile(dir, prefix string) (f File, err error)
Walk(root string, walkFn filepath.WalkFunc) error
WriteFile(filename string, data []byte, perm os.FileMode) error
WriteReader(path string, r io.Reader) (err error)
```
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
// Before: Coupled to the OS and difficult to test
// func ProcessConfiguration(path string) error {
// data, err := os.ReadFile(path)
// ...
// }
They are available under two different approaches to use. You can either call
them directly where the first parameter of each function will be the file
system, or you can declare a new `Afero`, a custom type used to bind these
functions as methods to a given filesystem.
import "github.com/spf13/afero"
### Calling utilities directly
```go
fs := new(afero.MemMapFs)
f, err := afero.TempFile(fs,"", "ioutil-test")
```
### Calling via Afero
```go
fs := afero.NewMemMapFs()
afs := &afero.Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
## Using Afero for Testing
There is a large benefit to using a mock filesystem for testing. It has a
completely blank state every time it is initialized and can be easily
reproducible regardless of OS. You could create files to your hearts content
and the file access would be fast while also saving you from all the annoying
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
backend is perfect for testing.
* Much faster than performing I/O operations on disk
* Avoid security issues and permissions
* Far more control. 'rm -rf /' with confidence
* Test setup is far more easier to do
* No test cleanup needed
One way to accomplish this is to define a variable as mentioned above.
In your application this will be set to afero.NewOsFs() during testing you
can set it to afero.NewMemMapFs().
It wouldn't be uncommon to have each test initialize a blank slate memory
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
appropriate in my application code. This approach ensures that Tests are order
independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
appFS := afero.NewMemMapFs()
// create test files and directories
appFS.MkdirAll("src/a", 0755)
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
name := "src/c"
_, err := appFS.Stat(name)
if os.IsNotExist(err) {
t.Errorf("file \"%s\" does not exist.\n", name)
}
// After: Decoupled, flexible, and testable
func ProcessConfiguration(fs afero.Fs, path string) error {
// Use Afero utility functions which mirror os/ioutil
data, err := afero.ReadFile(fs, path)
// ... process the data
return err
}
```
# Available Backends
### 2. Usage in Production
## Operating System Native
### OsFs
The first is simply a wrapper around the native OS calls. This makes it
very easy to use as all of the calls are the same as the existing OS
calls. It also makes it trivial to have your code use the OS during
operation and a mock filesystem during testing or as needed.
In your production environment, inject the `OsFs` backend, which wraps the standard operating system calls.
```go
appfs := afero.NewOsFs()
appfs.MkdirAll("src/a", 0755)
func main() {
// Use the real OS filesystem
AppFs := afero.NewOsFs()
ProcessConfiguration(AppFs, "/etc/myapp.conf")
}
```
## Memory Backed Storage
### 3. Usage in Testing
### MemMapFs
Afero also provides a fully atomic memory backed filesystem perfect for use in
mocking and to speed up unnecessary disk io when persistence isnt
necessary. It is fully concurrent and will work within go routines
safely.
In your tests, inject `MemMapFs`. This provides a blazing-fast, isolated, in-memory filesystem that requires no disk I/O and no cleanup.
```go
mm := afero.NewMemMapFs()
mm.MkdirAll("src/a", 0755)
func TestProcessConfiguration(t *testing.T) {
// Use the in-memory filesystem
AppFs := afero.NewMemMapFs()
// Pre-populate the memory filesystem for the test
configPath := "/test/config.json"
afero.WriteFile(AppFs, configPath, []byte(`{"feature": true}`), 0644)
// Run the test entirely in memory
err := ProcessConfiguration(AppFs, configPath)
if err != nil {
t.Fatal(err)
}
}
```
#### InMemoryFile
## Afero's Superpower: Composition
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
backed file implementation. This can be used in other memory backed file
systems with ease. Plans are to add a radix tree memory stored file
system using InMemoryFile.
Afero's most unique feature is its ability to combine filesystems. This allows you to build complex behaviors out of simple components, keeping your application logic clean.
## Network Interfaces
### Example 1: Sandboxing with Copy-on-Write
### SftpFs
Afero has experimental support for secure file transfer protocol (sftp). Which can
be used to perform file operations over a encrypted channel.
### GCSFs
Afero has experimental support for Google Cloud Storage (GCS). You can either set the
`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in
`NewGcsFS` to configure access to your GCS bucket.
Some known limitations of the existing implementation:
* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version.
* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version.
* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent.
## Filtering Backends
### BasePathFs
The BasePathFs restricts all operations to a given path within an Fs.
The given file name to the operations on this Fs will be prepended with
the base path before calling the source Fs.
Create a temporary environment where an application can "modify" system files without affecting the actual disk.
```go
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
// 1. The base layer is the real OS, made read-only for safety.
baseFs := afero.NewReadOnlyFs(afero.NewOsFs())
// 2. The overlay layer is a temporary in-memory filesystem for changes.
overlayFs := afero.NewMemMapFs()
// 3. Combine them. Reads fall through to the base; writes only hit the overlay.
sandboxFs := afero.NewCopyOnWriteFs(baseFs, overlayFs)
// The application can now "modify" /etc/hosts, but the changes are isolated in memory.
afero.WriteFile(sandboxFs, "/etc/hosts", []byte("127.0.0.1 sandboxed-app"), 0644)
// The real /etc/hosts on disk is untouched.
```
### ReadOnlyFs
### Example 2: Caching a Slow Filesystem
A thin wrapper around the source Fs providing a read only view.
Improve performance by layering a fast cache (like memory) over a slow backend (like a network drive or cloud storage).
```go
fs := afero.NewReadOnlyFs(afero.NewOsFs())
_, err := fs.Create("/file.txt")
// err = syscall.EPERM
import "time"
// Assume 'remoteFs' is a slow backend (e.g., SFTP or GCS)
var remoteFs afero.Fs
// 'cacheFs' is a fast in-memory backend
cacheFs := afero.NewMemMapFs()
// Create the caching layer. Cache items for 5 minutes upon first read.
cachedFs := afero.NewCacheOnReadFs(remoteFs, cacheFs, 5*time.Minute)
// The first read is slow (fetches from remote, then caches)
data1, _ := afero.ReadFile(cachedFs, "data.json")
// The second read is instant (serves from memory cache)
data2, _ := afero.ReadFile(cachedFs, "data.json")
```
# RegexpFs
### Example 3: Security Jails (chroot)
A filtered view on file names, any file NOT matching
the passed regexp will be treated as non-existing.
Files not matching the regexp provided will not be created.
Directories are not filtered.
Restrict an application component's access to a specific subdirectory.
```go
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
// err = syscall.ENOENT
osFs := afero.NewOsFs()
// Create a filesystem rooted at /home/user/public
// The application cannot access anything above this directory.
jailedFs := afero.NewBasePathFs(osFs, "/home/user/public")
// To the application, this is reading "/"
// In reality, it's reading "/home/user/public/"
dirInfo, err := afero.ReadDir(jailedFs, "/")
// Attempts to access parent directories fail
_, err = jailedFs.Open("../secrets.txt") // Returns an error
```
### HttpFs
## Real-World Use Cases
Afero provides an http compatible backend which can wrap any of the existing
backends.
### Build Cloud-Agnostic Applications
The Http package requires a slightly specific version of Open which
returns an http.File type.
Afero provides an httpFs file system which satisfies this requirement.
Any Afero FileSystem can be used as an httpFs.
Write applications that seamlessly work with different storage backends:
```go
httpFs := afero.NewHttpFs(<ExistingFS>)
fileserver := http.FileServer(httpFs.Dir(<PATH>))
http.Handle("/", fileserver)
type DocumentProcessor struct {
fs afero.Fs
}
func NewDocumentProcessor(fs afero.Fs) *DocumentProcessor {
return &DocumentProcessor{fs: fs}
}
func (p *DocumentProcessor) Process(inputPath, outputPath string) error {
// This code works whether fs is local disk, cloud storage, or memory
content, err := afero.ReadFile(p.fs, inputPath)
if err != nil {
return err
}
processed := processContent(content)
return afero.WriteFile(p.fs, outputPath, processed, 0644)
}
// Use with local filesystem
processor := NewDocumentProcessor(afero.NewOsFs())
// Use with Google Cloud Storage
processor := NewDocumentProcessor(gcsFS)
// Use with in-memory filesystem for testing
processor := NewDocumentProcessor(afero.NewMemMapFs())
```
## Composite Backends
### Treating Archives as Filesystems
Afero provides the ability have two filesystems (or more) act as a single
file system.
### CacheOnReadFs
The CacheOnReadFs will lazily make copies of any accessed files from the base
layer into the overlay. Subsequent reads will be pulled from the overlay
directly permitting the request is within the cache duration of when it was
created in the overlay.
If the base filesystem is writeable, any changes to files will be
done first to the base, then to the overlay layer. Write calls to open file
handles like `Write()` or `Truncate()` to the overlay first.
To writing files to the overlay only, you can use the overlay Fs directly (not
via the union Fs).
Cache files in the layer for the given time.Duration, a cache duration of 0
means "forever" meaning the file will not be re-requested from the base ever.
A read-only base will make the overlay also read-only but still copy files
from the base to the overlay when they're not present (or outdated) in the
caching layer.
Read files directly from `.zip` or `.tar` archives without unpacking them to disk first.
```go
base := afero.NewOsFs()
layer := afero.NewMemMapFs()
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
import (
"archive/zip"
"github.com/spf13/afero/zipfs"
)
// Assume 'zipReader' is a *zip.Reader initialized from a file or memory
var zipReader *zip.Reader
// Create a read-only ZipFs
archiveFS := zipfs.New(zipReader)
// Read a file from within the archive using the standard Afero API
content, err := afero.ReadFile(archiveFS, "/docs/readme.md")
```
### CopyOnWriteFs()
### Serving Any Filesystem over HTTP
The CopyOnWriteFs is a read only base file system with a potentially
writeable layer on top.
Read operations will first look in the overlay and if not found there, will
serve the file from the base.
Changes to the file system will only be made in the overlay.
Any attempt to modify a file found only in the base will copy the file to the
overlay layer before modification (including opening a file with a writable
handle).
Removing and Renaming files present only in the base layer is not currently
permitted. If a file is present in the base layer and the overlay, only the
overlay will be removed/renamed.
Use `HttpFs` to expose any Afero filesystem—even one created dynamically in memory—through a standard Go web server.
```go
base := afero.NewOsFs()
roBase := afero.NewReadOnlyFs(base)
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
import (
"net/http"
"github.com/spf13/afero"
)
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
func main() {
memFS := afero.NewMemMapFs()
afero.WriteFile(memFS, "index.html", []byte("<h1>Hello from Memory!</h1>"), 0644)
// Wrap the memory filesystem to make it compatible with http.FileServer.
httpFS := afero.NewHttpFs(memFS)
http.Handle("/", http.FileServer(httpFS.Dir("/")))
http.ListenAndServe(":8080", nil)
}
```
In this example all write operations will only occur in memory (MemMapFs)
leaving the base filesystem (OsFs) untouched.
### Testing Made Simple
One of Afero's greatest strengths is making filesystem-dependent code easily testable:
## Desired/possible backends
```go
func SaveUserData(fs afero.Fs, userID string, data []byte) error {
filename := fmt.Sprintf("users/%s.json", userID)
return afero.WriteFile(fs, filename, data, 0644)
}
The following is a short list of possible backends we hope someone will
implement:
func TestSaveUserData(t *testing.T) {
// Create a clean, fast, in-memory filesystem for testing
testFS := afero.NewMemMapFs()
userData := []byte(`{"name": "John", "email": "john@example.com"}`)
err := SaveUserData(testFS, "123", userData)
if err != nil {
t.Fatalf("SaveUserData failed: %v", err)
}
// Verify the file was saved correctly
saved, err := afero.ReadFile(testFS, "users/123.json")
if err != nil {
t.Fatalf("Failed to read saved file: %v", err)
}
if string(saved) != string(userData) {
t.Errorf("Data mismatch: got %s, want %s", saved, userData)
}
}
```
* SSH
* S3
**Benefits of testing with Afero:**
- ⚡ **Fast** - No disk I/O, tests run in memory
- 🔄 **Reliable** - Each test starts with a clean slate
- 🧹 **No cleanup** - Memory is automatically freed
- 🔒 **Safe** - Can't accidentally modify real files
- 🏃 **Parallel** - Tests can run concurrently without conflicts
# About the project
## Backend Reference
## What's in the name
| Type | Backend | Constructor | Description | Status |
| :--- | :--- | :--- | :--- | :--- |
| **Core** | **OsFs** | `afero.NewOsFs()` | Interacts with the real operating system filesystem. Use in production. | ✅ Official |
| | **MemMapFs** | `afero.NewMemMapFs()` | A fast, atomic, concurrent-safe, in-memory filesystem. Ideal for testing. | ✅ Official |
| **Composition** | **CopyOnWriteFs**| `afero.NewCopyOnWriteFs(base, overlay)` | A read-only base with a writable overlay. Ideal for sandboxing. | ✅ Official |
| | **CacheOnReadFs**| `afero.NewCacheOnReadFs(base, cache, ttl)` | Lazily caches files from a slow base into a fast layer on first read. | ✅ Official |
| | **BasePathFs** | `afero.NewBasePathFs(source, path)` | Restricts operations to a subdirectory (chroot/jail). | ✅ Official |
| | **ReadOnlyFs** | `afero.NewReadOnlyFs(source)` | Provides a read-only view, preventing any modifications. | ✅ Official |
| | **RegexpFs** | `afero.NewRegexpFs(source, regexp)` | Filters a filesystem, only showing files that match a regex. | ✅ Official |
| **Utility** | **HttpFs** | `afero.NewHttpFs(source)` | Wraps any Afero filesystem to be served via `http.FileServer`. | ✅ Official |
| **Archives** | **ZipFs** | `zipfs.New(zipReader)` | Read-only access to files within a ZIP archive. | ✅ Official |
| | **TarFs** | `tarfs.New(tarReader)` | Read-only access to files within a TAR archive. | ✅ Official |
| **Network** | **GcsFs** | `gcsfs.NewGcsFs(...)` | Google Cloud Storage backend. | ⚡ Experimental |
| | **SftpFs** | `sftpfs.New(...)` | SFTP backend. | ⚡ Experimental |
| **3rd Party Cloud** | **S3Fs** | [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) | Production-ready S3 backend built on official AWS SDK. | 🔹 3rd Party |
| | **MinioFs** | [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) | MinIO object storage backend with S3 compatibility. | 🔹 3rd Party |
| | **DriveFs** | [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) | Google Drive backend with streaming support. | 🔹 3rd Party |
| | **DropboxFs** | [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) | Dropbox backend with streaming support. | 🔹 3rd Party |
| **3rd Party Specialized** | **GitFs** | [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) | Git repository filesystem (read-only, Afero compatible). | 🔹 3rd Party |
| | **DockerFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Docker container filesystem access. | 🔹 3rd Party |
| | **GitHubFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | GitHub repository and releases filesystem. | 🔹 3rd Party |
| | **FilterFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Filesystem filtering with predicates. | 🔹 3rd Party |
| | **IgnoreFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | .gitignore-aware filtering filesystem. | 🔹 3rd Party |
| | **FUSEFs** | [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) | Generic FUSE implementation using any Afero backend. | 🔹 3rd Party |
Afero comes from the latin roots Ad-Facere.
## Afero vs. `io/fs` (Go 1.16+)
**"Ad"** is a prefix meaning "to".
Go 1.16 introduced the `io/fs` package, which provides a standard abstraction for **read-only** filesystems.
**"Facere"** is a form of the root "faciō" making "make or do".
Afero complements `io/fs` by focusing on different needs:
The literal meaning of afero is "to make" or "to do" which seems very fitting
for a library that allows one to make files and directories and do things with them.
* **Use `io/fs` when:** You only need to read files and want to conform strictly to the standard library interfaces.
* **Use Afero when:**
* Your application needs to **create, write, modify, or delete** files.
* You need to test complex read/write interactions (e.g., renaming, concurrent writes).
* You need advanced compositional features (Copy-on-Write, Caching, etc.).
The English word that shares the same roots as Afero is "affair". Affair shares
the same concept but as a noun it means "something that is made or done" or "an
object of a particular type".
Afero is fully compatible with `io/fs`. You can wrap any Afero filesystem to satisfy the `fs.FS` interface using `afero.NewIOFS`:
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
Googles very well.
```go
import "io/fs"
## Release Notes
// Create an Afero filesystem (writable)
var myAferoFs afero.Fs = afero.NewMemMapFs()
See the [Releases Page](https://github.com/spf13/afero/releases).
// Convert it to a standard library fs.FS (read-only view)
var myIoFs fs.FS = afero.NewIOFS(myAferoFs)
```
## Third-Party Backends & Ecosystem
The Afero community has developed numerous backends and tools that extend the library's capabilities. Below are curated, well-maintained options organized by maturity and reliability.
### Featured Community Backends
These are mature, reliable backends that we can confidently recommend for production use:
#### **Amazon S3** - [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3)
Production-ready S3 backend built on the official AWS SDK for Go.
```go
import "github.com/fclairamb/afero-s3"
s3fs := s3.NewFs(bucket, session)
```
#### **MinIO** - [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio)
MinIO object storage backend providing S3-compatible object storage with deduplication and optimization features.
```go
import "github.com/cpyun/afero-minio"
minioFs := miniofs.NewMinioFs(ctx, "minio://endpoint/bucket")
```
### Community & Specialized Backends
#### Cloud Storage
- **Google Drive** - [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive)
Streaming support; no write-seeking or POSIX permissions; no files listing cache
- **Dropbox** - [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox)
Streaming support; no write-seeking or POSIX permissions
#### Version Control Systems
- **Git Repositories** - [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs)
Read-only filesystem abstraction for Git repositories. Works with bare repositories and provides filesystem view of any git reference. Uses go-git for repository access.
#### Container and Remote Systems
- **Docker Containers** - [`unmango/aferox`](https://github.com/unmango/aferox)
Access Docker container filesystems as if they were local filesystems
- **GitHub API** - [`unmango/aferox`](https://github.com/unmango/aferox)
Turn GitHub repositories, releases, and assets into browsable filesystems
#### FUSE Integration
- **Generic FUSE** - [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem)
Mount any Afero filesystem as a FUSE filesystem, allowing any Afero backend to be used as a real mounted filesystem
#### Specialized Filesystems
- **FAT32 Support** - [`aligator/GoFAT`](https://github.com/aligator/GoFAT)
Pure Go FAT filesystem implementation (currently read-only)
### Interface Adapters & Utilities
**Cross-Interface Compatibility:**
- [`jfontan/go-billy-desfacer`](https://github.com/jfontan/go-billy-desfacer) - Adapter between Afero and go-billy interfaces (for go-git compatibility)
- [`Maldris/go-billy-afero`](https://github.com/Maldris/go-billy-afero) - Alternative wrapper for using Afero with go-billy
- [`c4milo/afero2billy`](https://github.com/c4milo/afero2billy) - Another Afero to billy filesystem adapter
**Working Directory Management:**
- [`carolynvs/aferox`](https://github.com/carolynvs/aferox) - Working directory-aware filesystem wrapper
**Advanced Filtering:**
- [`unmango/aferox`](https://github.com/unmango/aferox) includes multiple specialized filesystems:
- **FilterFs** - Predicate-based file filtering
- **IgnoreFs** - .gitignore-aware filtering
- **WriterFs** - Dump writes to io.Writer for debugging
#### Developer Tools & Utilities
**nhatthm Utility Suite** - Essential tools for Afero development:
- [`nhatthm/aferocopy`](https://github.com/nhatthm/aferocopy) - Copy files between any Afero filesystems
- [`nhatthm/aferomock`](https://github.com/nhatthm/aferomock) - Mocking toolkit for testing
- [`nhatthm/aferoassert`](https://github.com/nhatthm/aferoassert) - Assertion helpers for filesystem testing
### Ecosystem Showcase
**Windows Virtual Drives** - [`balazsgrill/potatodrive`](https://github.com/balazsgrill/potatodrive)
Mount any Afero filesystem as a Windows drive letter. Brilliant demonstration of Afero's power!
### Modern Asset Embedding (Go 1.16+)
Instead of third-party tools, use Go's native `//go:embed` with Afero:
```go
import (
"embed"
"github.com/spf13/afero"
)
//go:embed assets/*
var assetsFS embed.FS
func main() {
// Convert embedded files to Afero filesystem
fs := afero.FromIOFS(assetsFS)
// Use like any other Afero filesystem
content, _ := afero.ReadFile(fs, "assets/config.json")
}
```
## Contributing
1. Fork it
We welcome contributions! The project is mature, but we are actively looking for contributors to help implement and stabilize network/cloud backends.
* 🔥 **Microsoft Azure Blob Storage**
* 🔒 **Modern Encryption Backend** - Built on secure, contemporary crypto (not legacy EncFS)
* 🐙 **Canonical go-git Adapter** - Unified solution for Git integration
* 📡 **SSH/SCP Backend** - Secure remote file operations
* Stabilization of existing experimental backends (GCS, SFTP)
To contribute:
1. Fork the repository
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
5. Create a new Pull Request
## Contributors
## 📄 License
Names in no particular order:
Afero is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) for details.
* [spf13](https://github.com/spf13)
* [jaqx0r](https://github.com/jaqx0r)
* [mbertschler](https://github.com/mbertschler)
* [xor-gate](https://github.com/xor-gate)
## 🔗 Additional Resources
## License
- [📖 Full API Documentation](https://pkg.go.dev/github.com/spf13/afero)
- [🎯 Examples Repository](https://github.com/spf13/afero/tree/master/examples)
- [📋 Release Notes](https://github.com/spf13/afero/releases)
- [❓ GitHub Discussions](https://github.com/spf13/afero/discussions)
Afero is released under the Apache 2.0 license. See
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
---
*Afero comes from the Latin roots Ad-Facere, meaning "to make" or "to do" - fitting for a library that empowers you to make and do amazing things with filesystems.*

View file

@ -34,7 +34,8 @@ func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
_, err := u.base.Stat(name)
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT ||
oerr.Err == syscall.ENOTDIR {
return false, nil
}
}
@ -237,7 +238,11 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File,
return u.layer.OpenFile(name, flag, perm)
}
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
return nil, &os.PathError{
Op: "open",
Path: name,
Err: syscall.ENOTDIR,
} // ...or os.ErrNotExist?
}
if b {
return u.base.OpenFile(name, flag, perm)

View file

@ -137,7 +137,7 @@ type readDirFile struct {
var _ fs.ReadDirFile = readDirFile{}
func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
items, err := r.File.Readdir(n)
items, err := r.Readdir(n)
if err != nil {
return nil, err
}
@ -161,7 +161,12 @@ var _ Fs = FromIOFS{}
func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) }
func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) }
func (f FromIOFS) Mkdir(
name string,
perm os.FileMode,
) error {
return notImplemented("mkdir", name)
}
func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error {
return notImplemented("mkdirall", path)

View file

@ -19,9 +19,9 @@ import (
// Lstater is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
// It will call Lstat if the filesystem itself is, or it delegates to, the os filesystem.
// Else it will call Stat.
// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
// In addition to the FileInfo, it will return a boolean telling whether Lstat was called or not.
type Lstater interface {
LstatIfPossible(name string) (os.FileInfo, bool, error)
}

View file

@ -150,7 +150,11 @@ func (f *File) Sync() error {
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
if !f.fileData.dir {
return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
return nil, &os.PathError{
Op: "readdir",
Path: f.fileData.name,
Err: errors.New("not a dir"),
}
}
var outLength int64
@ -236,7 +240,11 @@ func (f *File) Truncate(size int64) error {
return ErrFileClosed
}
if f.readOnly {
return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
return &os.PathError{
Op: "truncate",
Path: f.fileData.name,
Err: errors.New("file handle is read only"),
}
}
if size < 0 {
return ErrOutOfRange
@ -273,7 +281,11 @@ func (f *File) Write(b []byte) (n int, err error) {
return 0, ErrFileClosed
}
if f.readOnly {
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
return 0, &os.PathError{
Op: "write",
Path: f.fileData.name,
Err: errors.New("file handle is read only"),
}
}
n = len(b)
cur := atomic.LoadInt64(&f.at)
@ -285,7 +297,9 @@ func (f *File) Write(b []byte) (n int, err error) {
tail = f.fileData.data[n+int(cur):]
}
if diff > 0 {
f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...)
f.fileData.data = append(
f.fileData.data,
append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...)
f.fileData.data = append(f.fileData.data, tail...)
} else {
f.fileData.data = append(f.fileData.data[:cur], b...)

View file

@ -92,7 +92,8 @@ func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
func (f *UnionFile) Write(s []byte) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.Write(s)
if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
if err == nil &&
f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
_, err = f.Base.Write(s)
}
return n, err
@ -157,7 +158,7 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err
// return a single view of the overlayed directories.
// At the end of the directory view, the error is io.EOF if c > 0.
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
var merge DirsMerger = f.Merger
merge := f.Merger
if merge == nil {
merge = defaultUnionMergeDirsFn
}

View file

@ -113,11 +113,11 @@ func GetTempDir(fs Fs, subPath string) string {
if subPath != "" {
// preserve windows backslash :-(
if FilePathSeparator == "\\" {
subPath = strings.Replace(subPath, "\\", "____", -1)
subPath = strings.ReplaceAll(subPath, "\\", "____")
}
dir = dir + UnicodeSanitize((subPath))
if FilePathSeparator == "\\" {
dir = strings.Replace(dir, "____", "\\", -1)
dir = strings.ReplaceAll(dir, "____", "\\")
}
if exists, _ := Exists(fs, dir); exists {

30
vendor/github.com/spf13/cast/map.go generated vendored
View file

@ -15,7 +15,7 @@ func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (m
m := map[K]V{}
if i == nil {
return nil, fmt.Errorf(errorMsg, i, i, m)
return m, fmt.Errorf(errorMsg, i, i, m)
}
switch v := i.(type) {
@ -45,14 +45,10 @@ func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (m
case string:
err := jsonStringToObject(v, &m)
if err != nil {
return nil, err
}
return m, nil
return m, err
default:
return nil, fmt.Errorf(errorMsg, i, i, m)
return m, fmt.Errorf(errorMsg, i, i, m)
}
}
@ -112,23 +108,19 @@ func ToStringMapStringSliceE(i any) (map[string][]string, error) {
for k, val := range v {
key, err := ToStringE(k)
if err != nil {
return nil, fmt.Errorf(errorMsg, i, i, m)
return m, fmt.Errorf(errorMsg, i, i, m)
}
value, err := ToStringSliceE(val)
if err != nil {
return nil, fmt.Errorf(errorMsg, i, i, m)
return m, fmt.Errorf(errorMsg, i, i, m)
}
m[key] = value
}
case string:
err := jsonStringToObject(v, &m)
if err != nil {
return nil, err
}
return m, nil
return m, err
default:
return nil, fmt.Errorf(errorMsg, i, i, m)
return m, fmt.Errorf(errorMsg, i, i, m)
}
return m, nil
@ -180,15 +172,11 @@ func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, err
case string:
err := jsonStringToObject(v, &m)
if err != nil {
return nil, err
}
return m, nil
return m, err
}
if reflect.TypeOf(i).Kind() != reflect.Map {
return nil, fmt.Errorf(errorMsg, i, i, m)
return m, fmt.Errorf(errorMsg, i, i, m)
}
mVal := reflect.ValueOf(m)

View file

@ -16,3 +16,6 @@ indent_style = tab
[*.nix]
indent_size = 2
[.golangci.yaml]
indent_size = 2

View file

@ -1,105 +1,118 @@
run:
timeout: 5m
version: "2"
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/spf13/viper)
gocritic:
# Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage.
enabled-tags:
- diagnostic
- experimental
- opinionated
- style
disabled-checks:
- importShadow
- unnamedResult
goimports:
local-prefixes: github.com/spf13/viper
run:
timeout: 5m
linters:
disable-all: true
enable:
- bodyclose
- dogsled
- dupl
- durationcheck
- exhaustive
- gci
- gocritic
- godot
- gofmt
- gofumpt
- goimports
- gomoddirectives
- goprintffuncname
- govet
- importas
- ineffassign
- makezero
- misspell
- nakedret
- nilerr
enable:
- bodyclose
- dogsled
- dupl
- durationcheck
- exhaustive
- gocritic
- godot
- gomoddirectives
- goprintffuncname
- govet
- importas
- ineffassign
- makezero
- misspell
- nakedret
- nilerr
- noctx
- nolintlint
- prealloc
- predeclared
- revive
- rowserrcheck
- sqlclosecheck
- staticcheck
- tparallel
- unconvert
- unparam
- unused
- wastedassign
- whitespace
# fixme
# - cyclop
# - errcheck
# - errorlint
# - exhaustivestruct
# - forbidigo
# - forcetypeassert
# - gochecknoglobals
# - gochecknoinits
# - gocognit
# - goconst
# - gocyclo
# - gosec
# - gosimple
# - ifshort
# - lll
# - nlreturn
# - paralleltest
# - scopelint
# - thelper
# - wrapcheck
# unused
# - depguard
# - goheader
# - gomodguard
# don't enable:
# - asciicheck
# - funlen
# - godox
# - goerr113
# - gomnd
# - interfacer
# - maligned
# - nestif
# - testpackage
# - wsl
exclusions:
rules:
- linters:
- errcheck
- noctx
- nolintlint
- prealloc
- predeclared
- revive
- rowserrcheck
- sqlclosecheck
- staticcheck
- stylecheck
- tparallel
- typecheck
- unconvert
- unparam
- unused
- wastedassign
- whitespace
path: _test.go
presets:
- comments
- std-error-handling
# fixme
# - cyclop
# - errcheck
# - errorlint
# - exhaustivestruct
# - forbidigo
# - forcetypeassert
# - gochecknoglobals
# - gochecknoinits
# - gocognit
# - goconst
# - gocyclo
# - gosec
# - gosimple
# - ifshort
# - lll
# - nlreturn
# - paralleltest
# - scopelint
# - thelper
# - wrapcheck
settings:
misspell:
locale: US
nolintlint:
allow-unused: false # report any unused nolint directives
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
gocritic:
# Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage.
enabled-tags:
- diagnostic
- experimental
- opinionated
- style
disabled-checks:
- importShadow
- unnamedResult
# unused
# - depguard
# - goheader
# - gomodguard
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
# - golines
# deprecated
# - deadcode
# - structcheck
# - varcheck
# don't enable:
# - asciicheck
# - funlen
# - godox
# - goerr113
# - gomnd
# - interfacer
# - maligned
# - nestif
# - testpackage
# - wsl
settings:
gci:
sections:
- standard
- default
- localmodule

View file

@ -12,7 +12,7 @@
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI)
[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square)
[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper)
**Go configuration with fangs!**
@ -821,7 +821,7 @@ You can use your favorite format's marshaller with the config returned by `AllSe
```go
import (
yaml "gopkg.in/yaml.v2"
yaml "go.yaml.in/yaml/v3"
// ...
)

View file

@ -83,6 +83,27 @@ v := viper.NewWithOptions(
)
```
### BREAKING: "github.com/mitchellh/mapstructure" depedency replaced
The original [mapstructure](https://github.com/mitchellh/mapstructure) has been [archived](https://github.com/mitchellh/mapstructure/issues/349) and was replaced with a [fork](https://github.com/go-viper/mapstructure) maintained by Viper ([#1723](https://github.com/spf13/viper/pull/1723)).
As a result, the package import path needs to be changed in cases where `mapstructure` is directly referenced in your code.
For example, when providing a custom decoder config:
```go
err := viper.Unmarshal(&appConfig, func(config *mapstructure.DecoderConfig) {
config.TagName = "yaml"
})
```
The change is fairly straightforward, just replace all occurrences of the import path `github.com/mitchellh/mapstructure` with `github.com/go-viper/mapstructure/v2`:
```diff
- import "github.com/mitchellh/mapstructure"
+ import "github.com/go-viper/mapstructure/v2"
```
### BREAKING: HCL, Java properties, INI removed from core
In order to reduce third-party dependencies, Viper dropped support for the following formats from the core:

View file

@ -2,30 +2,32 @@
"nodes": {
"cachix": {
"inputs": {
"devenv": "devenv_2",
"devenv": [
"devenv"
],
"flake-compat": [
"devenv"
],
"git-hooks": [
"devenv",
"flake-compat"
"git-hooks"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"pre-commit-hooks": [
"devenv",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1712055811,
"narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"owner": "cachix",
"repo": "cachix",
"rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"type": "github"
}
@ -33,52 +35,21 @@
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat_2",
"nix": "nix_2",
"nixpkgs": "nixpkgs_2",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1724763216,
"narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=",
"owner": "cachix",
"repo": "devenv",
"rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"flake-compat": [
"devenv",
"cachix",
"flake-compat"
],
"flake-compat": "flake-compat",
"git-hooks": "git-hooks",
"nix": "nix",
"nixpkgs": "nixpkgs",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"devenv",
"cachix",
"pre-commit-hooks"
]
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1708704632,
"narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
"lastModified": 1755257397,
"narHash": "sha256-VU+OHexL2y6y7yrpEc6bZvYYwoQg6aZK1b4YxT0yZCk=",
"owner": "cachix",
"repo": "devenv",
"rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
"rev": "6f9c3d4722aa253631644329f7bda60b1d3d1b97",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "python-rewrite",
"repo": "devenv",
"type": "github"
}
@ -86,27 +57,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
@ -116,15 +71,37 @@
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1733312601,
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1722555600,
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
"lastModified": 1754487366,
"narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
"rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18",
"type": "github"
},
"original": {
@ -133,39 +110,29 @@
"type": "github"
}
},
"flake-utils": {
"git-hooks": {
"inputs": {
"systems": "systems"
"flake-compat": [
"devenv",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"lastModified": 1750779888,
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
@ -173,7 +140,7 @@
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"git-hooks",
"nixpkgs"
]
},
@ -192,165 +159,49 @@
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688870561,
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix_2": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-parts": "flake-parts",
"git-hooks-nix": [
"devenv",
"git-hooks"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression_2"
"nixpkgs-23-11": [
"devenv"
],
"nixpkgs-regression": [
"devenv"
]
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"lastModified": 1755029779,
"narHash": "sha256-3+GHIYGg4U9XKUN4rg473frIVNn8YD06bjwxKS1IPrU=",
"owner": "cachix",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"rev": "b0972b0eee6726081d10b1199f54de6d2917f861",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"owner": "cachix",
"ref": "devenv-2.30",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1692808169,
"narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1722555339,
"narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-regression_2": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1710695816,
"narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "614b4613980a522ba49f0d194531beddbb7220d3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713361204,
"narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
"lastModified": 1750441195,
"narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"rev": "0ceffe312871b443929ff3006960d29b120dc627",
"type": "github"
},
"original": {
@ -360,13 +211,28 @@
"type": "github"
}
},
"nixpkgs_3": {
"nixpkgs-lib": {
"locked": {
"lastModified": 1724748588,
"narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=",
"lastModified": 1753579242,
"narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1755268003,
"narHash": "sha256-nNaeJjo861wFR0tjHDyCnHs1rbRtrMgxAKMoig9Sj/w=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99",
"rev": "32f313e49e42f715491e1ea7b306a87c16fe0388",
"type": "github"
},
"original": {
@ -376,94 +242,11 @@
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1692876271,
"narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1713775815,
"narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_3"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
"flake-parts": "flake-parts_2",
"nixpkgs": "nixpkgs_2"
}
}
},

View file

@ -7,51 +7,55 @@
devenv.url = "github:cachix/devenv";
};
outputs = inputs@{ flake-parts, ... }:
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
systems = [
"x86_64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
perSystem = { config, self', inputs', pkgs, system, ... }: rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
go.package = pkgs.go_1_23;
};
perSystem =
{ pkgs, ... }:
{
devenv.shells = {
default = {
languages = {
go.enable = true;
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
yamllint.enable = true;
};
git-hooks.hooks = {
nixpkgs-fmt.enable = true;
yamllint.enable = true;
};
packages = with pkgs; [
gnumake
packages = with pkgs; [
gnumake
golangci-lint
yamllint
];
golangci-lint
yamllint
];
scripts = {
versions.exec = ''
go version
golangci-lint version
scripts = {
versions.exec = ''
go version
golangci-lint version
'';
};
enterShell = ''
versions
'';
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
enterShell = ''
versions
'';
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
ci = devenv.shells.default;
};
};
};
}

View file

@ -1,6 +1,6 @@
package yaml
import "gopkg.in/yaml.v3"
import "go.yaml.in/yaml/v3"
// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding.
type Codec struct{}

View file

@ -219,7 +219,10 @@ func (v *Viper) watchKeyValueConfigOnChannel() error {
for {
b := <-rc
reader := bytes.NewReader(b.Value)
v.unmarshalReader(reader, v.kvstore)
err := v.unmarshalReader(reader, v.kvstore)
if err != nil {
v.logger.Error(fmt.Errorf("failed to unmarshal remote config: %w", err).Error())
}
}
}(respc)
return nil

View file

@ -174,10 +174,7 @@ func parseSizeInBytes(sizeStr string) uint {
}
}
size := cast.ToInt(sizeStr)
if size < 0 {
size = 0
}
size := max(cast.ToInt(sizeStr), 0)
return safeMul(uint(size), multiplier)
}

View file

@ -376,7 +376,12 @@ func (v *Viper) WatchConfig() {
}
}
}()
watcher.Add(configDir)
err = watcher.Add(configDir)
if err != nil {
v.logger.Error(fmt.Sprintf("failed to add watcher: %s", err))
initWG.Done()
return
}
initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on...
eventsWG.Wait() // now, wait for event loop to end in this go-routine...
}()
@ -1181,11 +1186,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any {
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "boolSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToBoolSlice(res)
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
case "uintSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToUintSlice(res)
case "float64Slice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToFloat64Slice(res)
case "durationSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
@ -1268,11 +1288,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any {
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "boolSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToBoolSlice(res)
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
case "uintSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToUintSlice(res)
case "float64Slice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToFloat64Slice(res)
case "stringToString":
return stringToStringConv(flag.ValueString())
case "stringToInt":
@ -1670,7 +1705,10 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error {
}
buf := new(bytes.Buffer)
buf.ReadFrom(in)
_, err := buf.ReadFrom(in)
if err != nil {
return fmt.Errorf("failed to read configuration from input: %w", err)
}
// TODO: remove this once SupportedExts is deprecated/removed
if !slices.Contains(SupportedExts, format) {