Pg to bun (#148)

* start moving to bun

* changing more stuff

* more

* and yet more

* tests passing

* seems stable now

* more big changes

* small fix

* little fixes
This commit is contained in:
tobi 2021-08-25 15:34:33 +02:00 committed by GitHub
commit 2dc9fc1626
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
713 changed files with 98694 additions and 22704 deletions

View file

@ -1,20 +0,0 @@
sudo: false
language: go
go:
- 1.11.x
- 1.12.x
- 1.13.x
- tip
matrix:
allow_failures:
- go: tip
env:
- GO111MODULE=on
go_import_path: github.com/vmihailenco/bufpool
before_install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0

View file

@ -1,23 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
Copyright (c) 2019 Vladimir Mihailenco
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,6 +0,0 @@
all:
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
golangci-lint run

View file

@ -1,74 +0,0 @@
# bufpool
[![Build Status](https://travis-ci.org/vmihailenco/bufpool.svg)](https://travis-ci.org/vmihailenco/bufpool)
[![GoDoc](https://godoc.org/github.com/vmihailenco/bufpool?status.svg)](https://godoc.org/github.com/vmihailenco/bufpool)
bufpool is an implementation of a pool of byte buffers with anti-memory-waste protection. It is based on the code and ideas from these 2 projects:
- https://github.com/libp2p/go-buffer-pool
- https://github.com/valyala/bytebufferpool
bufpool consists of global pool of buffers that have a capacity of a power of 2 starting from 64 bytes to 32 megabytes. It also provides individual pools that maintain usage stats to provide buffers of the size that satisfies 95% of the calls. Global pool is used to reuse buffers between different parts of the app.
# Installation
``` go
go get github.com/vmihailenco/bufpool
```
# Usage
bufpool can be used as a replacement for `sync.Pool`:
``` go
var jsonPool bufpool.Pool // basically sync.Pool with usage stats
func writeJSON(w io.Writer, obj interface{}) error {
buf := jsonPool.Get()
defer jsonPool.Put(buf)
if err := json.NewEncoder(buf).Encode(obj); err != nil {
return err
}
_, err := w.Write(buf.Bytes())
return err
}
```
or to allocate buffer of the given size:
``` go
func writeHex(w io.Writer, data []byte) error {
n := hex.EncodedLen(len(data)))
buf := bufpool.Get(n) // buf.Len() is guaranteed to equal n
defer bufpool.Put(buf)
tmp := buf.Bytes()
hex.Encode(tmp, data)
_, err := w.Write(tmp)
return err
}
```
If you need to append data to the buffer you can use following pattern:
``` go
buf := bufpool.Get(n)
defer bufpool.Put(buf)
bb := buf.Bytes()[:0]
bb = append(bb, ...)
buf.ResetBuf(bb)
```
You can also change default pool thresholds:
``` go
var jsonPool = bufpool.Pool{
ServePctile: 0.95, // serve p95 buffers
}
```

View file

@ -1,67 +0,0 @@
package bufpool
import (
"log"
"sync"
)
var thePool bufPool
// Get retrieves a buffer of the appropriate length from the buffer pool or
// allocates a new one. Get may choose to ignore the pool and treat it as empty.
// Callers should not assume any relation between values passed to Put and the
// values returned by Get.
//
// If no suitable buffer exists in the pool, Get creates one.
func Get(length int) *Buffer {
return thePool.Get(length)
}
// Put returns a buffer to the buffer pool.
func Put(buf *Buffer) {
thePool.Put(buf)
}
type bufPool struct {
pools [steps]sync.Pool
}
func (p *bufPool) Get(length int) *Buffer {
if length > maxPoolSize {
return NewBuffer(make([]byte, length))
}
idx := index(length)
if bufIface := p.pools[idx].Get(); bufIface != nil {
buf := bufIface.(*Buffer)
unlock(buf)
if length > buf.Cap() {
log.Println(idx, buf.Len(), buf.Cap(), buf.String())
}
buf.buf = buf.buf[:length]
return buf
}
b := make([]byte, length, indexSize(idx))
return NewBuffer(b)
}
func (p *bufPool) Put(buf *Buffer) {
length := buf.Cap()
if length > maxPoolSize || length < minSize {
return // drop it
}
idx := prevIndex(length)
lock(buf)
p.pools[idx].Put(buf)
}
func lock(buf *Buffer) {
buf.buf = buf.buf[:cap(buf.buf)]
buf.off = cap(buf.buf) + 1
}
func unlock(buf *Buffer) {
buf.off = 0
}

View file

@ -1,397 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufpool
// Simple byte buffer for marshaling data.
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
// smallBufferSize is an initial allocation minimal capacity.
const smallBufferSize = 64
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
lastRead readOp // last read operation, so that Unread* can work correctly.
}
// The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can check for
// invalid usage. opReadRuneX constants are chosen such that
// converted to int they correspond to the rune size that was read.
type readOp int8
// Don't use iota for these, as the values need to correspond with the
// names and comments, which is easier to see when being explicit.
const (
opRead readOp = -1 // Any other read operation.
opInvalid readOp = 0 // Non-read operation.
opReadRune1 readOp = 1 // Read rune of size 1.
)
var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
const maxInt = int(^uint(0) >> 1)
// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
// only until the next call to a method like Read, Write, Reset, or Truncate).
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
//
// To build strings more efficiently, see the strings.Builder type.
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// empty reports whether the unread portion of the buffer is empty.
func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Cap returns the capacity of the buffer's underlying byte slice, that is, the
// total space allocated for the buffer's data.
func (b *Buffer) Cap() int { return cap(b.buf) }
// Truncate discards all but the first n unread bytes from the buffer
// but continues to use the same allocated storage.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
if n == 0 {
b.Reset()
return
}
b.lastRead = opInvalid
if n < 0 || n > b.Len() {
panic("bytes.Buffer: truncation out of range")
}
b.buf = b.buf[:b.off+n]
}
// tryGrowByReslice is a inlineable version of grow for the fast-case where the
// internal buffer only needs to be resliced.
// It returns the index where bytes should be written and whether it succeeded.
func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
b.buf = b.buf[:l+n]
return l, true
}
return 0, false
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(len(p))
if !ok {
m = b.grow(len(p))
}
return copy(b.buf[m:], p), nil
}
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(len(s))
if !ok {
m = b.grow(len(s))
}
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const minRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
for {
i := b.grow(minRead)
b.buf = b.buf[:i]
m, e := r.Read(b.buf[i:cap(b.buf)])
if m < 0 {
panic(errNegativeRead)
}
b.buf = b.buf[:i+m]
n += int64(m)
if e == io.EOF {
return n, nil // e is EOF, so return nil explicitly
}
if e != nil {
return n, e
}
}
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
if nBytes := b.Len(); nBytes > 0 {
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Reset()
return n, nil
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(1)
if !ok {
m = b.grow(1)
}
b.buf[m] = c
return nil
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
if r < utf8.RuneSelf {
_ = b.WriteByte(byte(r))
return 1, nil
}
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(utf8.UTFMax)
if !ok {
m = b.grow(utf8.UTFMax)
}
n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
b.buf = b.buf[:m+n]
return n, nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
if len(p) == 0 {
return 0, nil
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
if n > 0 {
b.lastRead = opRead
}
return n, nil
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
b.lastRead = opInvalid
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
if n > 0 {
b.lastRead = opRead
}
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (byte, error) {
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
return 0, io.EOF
}
c := b.buf[b.off]
b.off++
b.lastRead = opRead
return c, nil
}
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
return 0, 0, io.EOF
}
c := b.buf[b.off]
if c < utf8.RuneSelf {
b.off++
b.lastRead = opReadRune1
return rune(c), 1, nil
}
r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n
b.lastRead = readOp(n)
return r, n, nil
}
// UnreadRune unreads the last rune returned by ReadRune.
// If the most recent read or write operation on the buffer was
// not a successful ReadRune, UnreadRune returns an error. (In this regard
// it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead <= opInvalid {
return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
}
if b.off >= int(b.lastRead) {
b.off -= int(b.lastRead)
}
b.lastRead = opInvalid
return nil
}
var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
// UnreadByte unreads the last byte returned by the most recent successful
// read operation that read at least one byte. If a write has happened since
// the last read, if the last read returned an error, or if the read read zero
// bytes, UnreadByte returns an error.
func (b *Buffer) UnreadByte() error {
if b.lastRead == opInvalid {
return errUnreadByte
}
b.lastRead = opInvalid
if b.off > 0 {
b.off--
}
return nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return line, err
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := bytes.IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
b.lastRead = opRead
return line, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
slice, err := b.readSlice(delim)
return string(slice), err
}
// NewBuffer creates and initializes a new Buffer using buf as its
// initial contents. The new Buffer takes ownership of buf, and the
// caller should not use buf after this call. NewBuffer is intended to
// prepare a Buffer to read existing data. It can also be used to set
// the initial size of the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}

View file

@ -1,66 +0,0 @@
package bufpool
import "bytes"
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
// Reset is the same as Truncate(0).
func (b *Buffer) Reset() {
if b.off > cap(b.buf) {
panic("Buffer is used after Put")
}
b.buf = b.buf[:0]
b.off = 0
b.lastRead = opInvalid
}
func (b *Buffer) ResetBuf(buf []byte) {
if b.off > cap(b.buf) {
panic("Buffer is used after Put")
}
b.buf = buf[:0]
b.off = 0
b.lastRead = opInvalid
}
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int {
if b.off > cap(b.buf) {
panic("Buffer is used after Put")
}
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
b.Reset()
}
// Try to grow by means of a reslice.
if i, ok := b.tryGrowByReslice(n); ok {
return i
}
if b.buf == nil && n <= smallBufferSize {
b.buf = make([]byte, n, smallBufferSize)
return 0
}
c := cap(b.buf)
if n <= c/2-m {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= c to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf, b.buf[b.off:])
} else if c > maxInt-c-n {
panic(bytes.ErrTooLarge)
} else {
// Not enough space anywhere, we need to allocate.
tmp := Get(2*c + n)
copy(tmp.buf, b.buf[b.off:])
b.buf, tmp.buf = tmp.buf, b.buf
Put(tmp)
}
// Restore b.off and len(b.buf).
b.off = 0
b.buf = b.buf[:m+n]
return m
}

View file

@ -1,9 +0,0 @@
module github.com/vmihailenco/bufpool
go 1.13
require (
github.com/kr/pretty v0.1.0 // indirect
github.com/stretchr/testify v1.5.1
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
)

View file

@ -1,17 +0,0 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -1,148 +0,0 @@
package bufpool
import (
"math/bits"
"sync/atomic"
)
const (
minBitSize = 6 // 2**6=64 is a CPU cache line size
steps = 20
minSize = 1 << minBitSize // 64 bytes
maxSize = 1 << (minBitSize + steps - 1) // 32 mb
maxPoolSize = maxSize << 1 // 64 mb
defaultServePctile = 0.95
calibrateCallsThreshold = 42000
defaultSize = 4096
)
// Pool represents byte buffer pool.
//
// Different pools should be used for different usage patterns to achieve better
// performance and lower memory usage.
type Pool struct {
calls [steps]uint32
calibrating uint32
ServePctile float64 // default is 0.95
serveSize uint32
}
func (p *Pool) getServeSize() int {
size := atomic.LoadUint32(&p.serveSize)
if size > 0 {
return int(size)
}
for i := 0; i < len(p.calls); i++ {
calls := atomic.LoadUint32(&p.calls[i])
if calls > 10 {
size := indexSize(i)
atomic.CompareAndSwapUint32(&p.serveSize, 0, uint32(size))
return size
}
}
return defaultSize
}
// Get returns an empty buffer from the pool. Returned buffer capacity
// is determined by accumulated usage stats and changes over time.
//
// The buffer may be returned to the pool using Put or retained for further
// usage. In latter case buffer length must be updated using UpdateLen.
func (p *Pool) Get() *Buffer {
buf := Get(p.getServeSize())
buf.Reset()
return buf
}
// New returns an empty buffer bypassing the pool. Returned buffer capacity
// is determined by accumulated usage stats and changes over time.
func (p *Pool) New() *Buffer {
return NewBuffer(make([]byte, 0, p.getServeSize()))
}
// Put returns buffer to the pool.
func (p *Pool) Put(buf *Buffer) {
length := buf.Len()
if length == 0 {
length = buf.Cap()
}
p.UpdateLen(length)
// Always put buf to the pool.
Put(buf)
}
// UpdateLen updates stats about buffer length.
func (p *Pool) UpdateLen(bufLen int) {
idx := index(bufLen)
if atomic.AddUint32(&p.calls[idx], 1) > calibrateCallsThreshold {
p.calibrate()
}
}
func (p *Pool) calibrate() {
if !atomic.CompareAndSwapUint32(&p.calibrating, 0, 1) {
return
}
var callSum uint64
var calls [steps]uint32
for i := 0; i < len(p.calls); i++ {
n := atomic.SwapUint32(&p.calls[i], 0)
calls[i] = n
callSum += uint64(n)
}
serveSum := uint64(float64(callSum) * p.getServePctile())
var serveSize int
callSum = 0
for i, numCall := range &calls {
callSum += uint64(numCall)
if serveSize == 0 && callSum >= serveSum {
serveSize = indexSize(i)
break
}
}
atomic.StoreUint32(&p.serveSize, uint32(serveSize))
atomic.StoreUint32(&p.calibrating, 0)
}
func (p *Pool) getServePctile() float64 {
if p.ServePctile > 0 {
return p.ServePctile
}
return defaultServePctile
}
func index(n int) int {
if n == 0 {
return 0
}
idx := bits.Len32(uint32((n - 1) >> minBitSize))
if idx >= steps {
idx = steps - 1
}
return idx
}
func prevIndex(n int) int {
next := index(n)
if next == 0 || n == indexSize(next) {
return next
}
return next - 1
}
func indexSize(idx int) int {
return minSize << uint(idx)
}

View file

@ -1,24 +0,0 @@
dist: xenial
sudo: false
language: go
go:
- 1.11.x
- 1.12.x
- tip
matrix:
allow_failures:
- go: tip
env:
- GO111MODULE=on
go_import_path: github.com/vmihailenco/tagparser
before_install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1
script:
- make
- golangci-lint run

View file

@ -1,25 +0,0 @@
Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,8 +0,0 @@
all:
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
go vet ./...
go get github.com/gordonklaus/ineffassign
ineffassign .

View file

@ -1,24 +0,0 @@
# Opinionated Golang tag parser
[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser)
[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser)
## Installation
Install:
```shell
go get -u github.com/vmihailenco/tagparser
```
## Quickstart
```go
func ExampleParse() {
tag := tagparser.Parse("some_name,key:value,key2:'complex value'")
fmt.Println(tag.Name)
fmt.Println(tag.Options)
// Output: some_name
// map[key:value key2:'complex value']
}
```

View file

@ -1,3 +0,0 @@
module github.com/vmihailenco/tagparser
go 1.13

View file

@ -1,82 +0,0 @@
package parser
import (
"bytes"
"github.com/vmihailenco/tagparser/internal"
)
type Parser struct {
b []byte
i int
}
func New(b []byte) *Parser {
return &Parser{
b: b,
}
}
func NewString(s string) *Parser {
return New(internal.StringToBytes(s))
}
func (p *Parser) Bytes() []byte {
return p.b[p.i:]
}
func (p *Parser) Valid() bool {
return p.i < len(p.b)
}
func (p *Parser) Read() byte {
if p.Valid() {
c := p.b[p.i]
p.Advance()
return c
}
return 0
}
func (p *Parser) Peek() byte {
if p.Valid() {
return p.b[p.i]
}
return 0
}
func (p *Parser) Advance() {
p.i++
}
func (p *Parser) Skip(skip byte) bool {
if p.Peek() == skip {
p.Advance()
return true
}
return false
}
func (p *Parser) SkipBytes(skip []byte) bool {
if len(skip) > len(p.b[p.i:]) {
return false
}
if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) {
return false
}
p.i += len(skip)
return true
}
func (p *Parser) ReadSep(sep byte) ([]byte, bool) {
ind := bytes.IndexByte(p.b[p.i:], sep)
if ind == -1 {
b := p.b[p.i:]
p.i = len(p.b)
return b, false
}
b := p.b[p.i : p.i+ind]
p.i += ind + 1
return b, true
}

View file

@ -1,11 +0,0 @@
// +build appengine
package internal
func BytesToString(b []byte) string {
return string(b)
}
func StringToBytes(s string) []byte {
return []byte(s)
}

View file

@ -1,22 +0,0 @@
// +build !appengine
package internal
import (
"unsafe"
)
// BytesToString converts byte slice to string.
func BytesToString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// StringToBytes converts string to byte slice.
func StringToBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(
&struct {
string
Cap int
}{s, len(s)},
))
}

View file

@ -1,181 +0,0 @@
package tagparser
import (
"strings"
"github.com/vmihailenco/tagparser/internal/parser"
)
type Tag struct {
Name string
Options map[string]string
}
func (t *Tag) HasOption(name string) bool {
_, ok := t.Options[name]
return ok
}
func Parse(s string) *Tag {
p := &tagParser{
Parser: parser.NewString(s),
}
p.parseKey()
return &p.Tag
}
type tagParser struct {
*parser.Parser
Tag Tag
hasName bool
key string
}
func (p *tagParser) setTagOption(key, value string) {
key = strings.TrimSpace(key)
value = strings.TrimSpace(value)
if !p.hasName {
p.hasName = true
if key == "" {
p.Tag.Name = value
return
}
}
if p.Tag.Options == nil {
p.Tag.Options = make(map[string]string)
}
if key == "" {
p.Tag.Options[value] = ""
} else {
p.Tag.Options[key] = value
}
}
func (p *tagParser) parseKey() {
p.key = ""
var b []byte
for p.Valid() {
c := p.Read()
switch c {
case ',':
p.Skip(' ')
p.setTagOption("", string(b))
p.parseKey()
return
case ':':
p.key = string(b)
p.parseValue()
return
case '\'':
p.parseQuotedValue()
return
default:
b = append(b, c)
}
}
if len(b) > 0 {
p.setTagOption("", string(b))
}
}
func (p *tagParser) parseValue() {
const quote = '\''
c := p.Peek()
if c == quote {
p.Skip(quote)
p.parseQuotedValue()
return
}
var b []byte
for p.Valid() {
c = p.Read()
switch c {
case '\\':
b = append(b, p.Read())
case '(':
b = append(b, c)
b = p.readBrackets(b)
case ',':
p.Skip(' ')
p.setTagOption(p.key, string(b))
p.parseKey()
return
default:
b = append(b, c)
}
}
p.setTagOption(p.key, string(b))
}
func (p *tagParser) readBrackets(b []byte) []byte {
var lvl int
loop:
for p.Valid() {
c := p.Read()
switch c {
case '\\':
b = append(b, p.Read())
case '(':
b = append(b, c)
lvl++
case ')':
b = append(b, c)
lvl--
if lvl < 0 {
break loop
}
default:
b = append(b, c)
}
}
return b
}
func (p *tagParser) parseQuotedValue() {
const quote = '\''
var b []byte
b = append(b, quote)
for p.Valid() {
bb, ok := p.ReadSep(quote)
if !ok {
b = append(b, bb...)
break
}
if len(bb) > 0 && bb[len(bb)-1] == '\\' {
b = append(b, bb[:len(bb)-1]...)
b = append(b, quote)
continue
}
b = append(b, bb...)
b = append(b, quote)
break
}
p.setTagOption(p.key, string(b))
if p.Skip(',') {
p.Skip(' ')
}
p.parseKey()
}
func Unquote(s string) (string, bool) {
const quote = '\''
if len(s) < 2 {
return s, false
}
if s[0] == quote && s[len(s)-1] == quote {
return s[1 : len(s)-1], true
}
return s, false
}