Grand test fixup (#138)

* start fixing up tests

* fix up tests + automate with drone

* fiddle with linting

* messing about with drone.yml

* some more fiddling

* hmmm

* add cache

* add vendor directory

* verbose

* ci updates

* update some little things

* update sig
This commit is contained in:
Tobi Smethurst 2021-08-12 21:03:24 +02:00 committed by GitHub
commit 98263a7de6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2677 changed files with 1090869 additions and 219 deletions

26
vendor/github.com/go-pg/pg/v10/internal/context.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
package internal
import (
"context"
"time"
)
type UndoneContext struct {
context.Context
}
func UndoContext(ctx context.Context) UndoneContext {
return UndoneContext{Context: ctx}
}
func (UndoneContext) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false
}
func (UndoneContext) Done() <-chan struct{} {
return nil
}
func (UndoneContext) Err() error {
return nil
}

61
vendor/github.com/go-pg/pg/v10/internal/error.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
package internal
import (
"fmt"
)
var (
ErrNoRows = Errorf("pg: no rows in result set")
ErrMultiRows = Errorf("pg: multiple rows in result set")
)
type Error struct {
s string
}
func Errorf(s string, args ...interface{}) Error {
return Error{s: fmt.Sprintf(s, args...)}
}
func (err Error) Error() string {
return err.s
}
type PGError struct {
m map[byte]string
}
func NewPGError(m map[byte]string) PGError {
return PGError{
m: m,
}
}
func (err PGError) Field(k byte) string {
return err.m[k]
}
func (err PGError) IntegrityViolation() bool {
switch err.Field('C') {
case "23000", "23001", "23502", "23503", "23505", "23514", "23P01":
return true
default:
return false
}
}
func (err PGError) Error() string {
return fmt.Sprintf("%s #%s %s",
err.Field('S'), err.Field('C'), err.Field('M'))
}
func AssertOneRow(l int) error {
switch {
case l == 0:
return ErrNoRows
case l > 1:
return ErrMultiRows
default:
return nil
}
}

27
vendor/github.com/go-pg/pg/v10/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
/*
internal is a private internal package.
*/
package internal
import (
"math/rand"
"time"
)
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
if retry < 0 {
panic("not reached")
}
if minBackoff == 0 {
return 0
}
d := minBackoff << uint(retry)
d = minBackoff + time.Duration(rand.Int63n(int64(d)))
if d > maxBackoff || d < minBackoff {
d = maxBackoff
}
return d
}

28
vendor/github.com/go-pg/pg/v10/internal/log.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
package internal
import (
"context"
"fmt"
"log"
"os"
)
var Warn = log.New(os.Stderr, "WARN: pg: ", log.LstdFlags)
var Deprecated = log.New(os.Stderr, "DEPRECATED: pg: ", log.LstdFlags)
type Logging interface {
Printf(ctx context.Context, format string, v ...interface{})
}
type logger struct {
log *log.Logger
}
func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
_ = l.log.Output(2, fmt.Sprintf(format, v...))
}
var Logger Logging = &logger{
log: log.New(os.Stderr, "pg: ", log.LstdFlags|log.Lshortfile),
}

View file

@ -0,0 +1,141 @@
package parser
import (
"bytes"
"strconv"
"github.com/go-pg/pg/v10/internal"
)
type Parser struct {
b []byte
i int
}
func New(b []byte) *Parser {
return &Parser{
b: b,
}
}
func NewString(s string) *Parser {
return New(internal.StringToBytes(s))
}
func (p *Parser) Valid() bool {
return p.i < len(p.b)
}
func (p *Parser) Bytes() []byte {
return p.b[p.i:]
}
func (p *Parser) Read() byte {
if p.Valid() {
c := p.b[p.i]
p.Advance()
return c
}
return 0
}
func (p *Parser) Peek() byte {
if p.Valid() {
return p.b[p.i]
}
return 0
}
func (p *Parser) Advance() {
p.i++
}
func (p *Parser) Skip(skip byte) bool {
if p.Peek() == skip {
p.Advance()
return true
}
return false
}
func (p *Parser) SkipBytes(skip []byte) bool {
if len(skip) > len(p.b[p.i:]) {
return false
}
if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) {
return false
}
p.i += len(skip)
return true
}
func (p *Parser) ReadSep(sep byte) ([]byte, bool) {
ind := bytes.IndexByte(p.b[p.i:], sep)
if ind == -1 {
b := p.b[p.i:]
p.i = len(p.b)
return b, false
}
b := p.b[p.i : p.i+ind]
p.i += ind + 1
return b, true
}
func (p *Parser) ReadIdentifier() (string, bool) {
if p.i < len(p.b) && p.b[p.i] == '(' {
s := p.i + 1
if ind := bytes.IndexByte(p.b[s:], ')'); ind != -1 {
b := p.b[s : s+ind]
p.i = s + ind + 1
return internal.BytesToString(b), false
}
}
ind := len(p.b) - p.i
var alpha bool
for i, c := range p.b[p.i:] {
if isNum(c) {
continue
}
if isAlpha(c) || (i > 0 && alpha && c == '_') {
alpha = true
continue
}
ind = i
break
}
if ind == 0 {
return "", false
}
b := p.b[p.i : p.i+ind]
p.i += ind
return internal.BytesToString(b), !alpha
}
func (p *Parser) ReadNumber() int {
ind := len(p.b) - p.i
for i, c := range p.b[p.i:] {
if !isNum(c) {
ind = i
break
}
}
if ind == 0 {
return 0
}
n, err := strconv.Atoi(string(p.b[p.i : p.i+ind]))
if err != nil {
panic(err)
}
p.i += ind
return n
}
func isNum(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

View file

@ -0,0 +1,65 @@
package parser
import (
"fmt"
"github.com/go-pg/pg/v10/internal/pool"
)
type StreamingParser struct {
pool.Reader
}
func NewStreamingParser(rd pool.Reader) StreamingParser {
return StreamingParser{
Reader: rd,
}
}
func (p StreamingParser) SkipByte(skip byte) error {
c, err := p.ReadByte()
if err != nil {
return err
}
if c == skip {
return nil
}
_ = p.UnreadByte()
return fmt.Errorf("got %q, wanted %q", c, skip)
}
func (p StreamingParser) ReadSubstring(b []byte) ([]byte, error) {
c, err := p.ReadByte()
if err != nil {
return b, err
}
for {
if c == '"' {
return b, nil
}
next, err := p.ReadByte()
if err != nil {
return b, err
}
if c == '\\' {
switch next {
case '\\', '"':
b = append(b, next)
c, err = p.ReadByte()
if err != nil {
return nil, err
}
default:
b = append(b, '\\')
c = next
}
continue
}
b = append(b, c)
c = next
}
}

158
vendor/github.com/go-pg/pg/v10/internal/pool/conn.go generated vendored Normal file
View file

@ -0,0 +1,158 @@
package pool
import (
"context"
"net"
"strconv"
"sync/atomic"
"time"
)
var noDeadline = time.Time{}
type Conn struct {
netConn net.Conn
rd *ReaderContext
ProcessID int32
SecretKey int32
lastID int64
createdAt time.Time
usedAt uint32 // atomic
pooled bool
Inited bool
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
createdAt: time.Now(),
}
cn.SetNetConn(netConn)
cn.SetUsedAt(time.Now())
return cn
}
func (cn *Conn) UsedAt() time.Time {
unix := atomic.LoadUint32(&cn.usedAt)
return time.Unix(int64(unix), 0)
}
func (cn *Conn) SetUsedAt(tm time.Time) {
atomic.StoreUint32(&cn.usedAt, uint32(tm.Unix()))
}
func (cn *Conn) RemoteAddr() net.Addr {
return cn.netConn.RemoteAddr()
}
func (cn *Conn) SetNetConn(netConn net.Conn) {
cn.netConn = netConn
if cn.rd != nil {
cn.rd.Reset(netConn)
}
}
func (cn *Conn) LockReader() {
if cn.rd != nil {
panic("not reached")
}
cn.rd = NewReaderContext()
cn.rd.Reset(cn.netConn)
}
func (cn *Conn) NetConn() net.Conn {
return cn.netConn
}
func (cn *Conn) NextID() string {
cn.lastID++
return strconv.FormatInt(cn.lastID, 10)
}
func (cn *Conn) WithReader(
ctx context.Context, timeout time.Duration, fn func(rd *ReaderContext) error,
) error {
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
}
rd := cn.rd
if rd == nil {
rd = GetReaderContext()
defer PutReaderContext(rd)
rd.Reset(cn.netConn)
}
rd.bytesRead = 0
if err := fn(rd); err != nil {
return err
}
return nil
}
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wb *WriteBuffer) error,
) error {
wb := GetWriteBuffer()
defer PutWriteBuffer(wb)
if err := fn(wb); err != nil {
return err
}
return cn.writeBuffer(ctx, timeout, wb)
}
func (cn *Conn) WriteBuffer(ctx context.Context, timeout time.Duration, wb *WriteBuffer) error {
return cn.writeBuffer(ctx, timeout, wb)
}
func (cn *Conn) writeBuffer(
ctx context.Context,
timeout time.Duration,
wb *WriteBuffer,
) error {
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
}
if _, err := cn.netConn.Write(wb.Bytes); err != nil {
return err
}
return nil
}
func (cn *Conn) Close() error {
return cn.netConn.Close()
}
func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
tm := time.Now()
cn.SetUsedAt(tm)
if timeout > 0 {
tm = tm.Add(timeout)
}
if ctx != nil {
deadline, ok := ctx.Deadline()
if ok {
if timeout == 0 {
return deadline
}
if deadline.Before(tm) {
return deadline
}
return tm
}
}
if timeout > 0 {
return tm
}
return noDeadline
}

506
vendor/github.com/go-pg/pg/v10/internal/pool/pool.go generated vendored Normal file
View file

@ -0,0 +1,506 @@
package pool
import (
"context"
"errors"
"net"
"sync"
"sync/atomic"
"time"
"github.com/go-pg/pg/v10/internal"
)
var (
ErrClosed = errors.New("pg: database is closed")
ErrPoolTimeout = errors.New("pg: connection pool timeout")
)
var timers = sync.Pool{
New: func() interface{} {
t := time.NewTimer(time.Hour)
t.Stop()
return t
},
}
// Stats contains pool state information and accumulated stats.
type Stats struct {
Hits uint32 // number of times free connection was found in the pool
Misses uint32 // number of times free connection was NOT found in the pool
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // number of total connections in the pool
IdleConns uint32 // number of idle connections in the pool
StaleConns uint32 // number of stale connections removed from the pool
}
type Pooler interface {
NewConn(context.Context) (*Conn, error)
CloseConn(*Conn) error
Get(context.Context) (*Conn, error)
Put(context.Context, *Conn)
Remove(context.Context, *Conn, error)
Len() int
IdleLen() int
Stats() *Stats
Close() error
}
type Options struct {
Dialer func(context.Context) (net.Conn, error)
OnClose func(*Conn) error
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
}
type ConnPool struct {
opt *Options
dialErrorsNum uint32 // atomic
_closed uint32 // atomic
lastDialErrorMu sync.RWMutex
lastDialError error
queue chan struct{}
stats Stats
connsMu sync.Mutex
conns []*Conn
idleConns []*Conn
poolSize int
idleConnsLen int
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(opt *Options) *ConnPool {
p := &ConnPool{
opt: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
idleConns: make([]*Conn, 0, opt.PoolSize),
}
p.connsMu.Lock()
p.checkMinIdleConns()
p.connsMu.Unlock()
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
go p.reaper(opt.IdleCheckFrequency)
}
return p
}
func (p *ConnPool) checkMinIdleConns() {
if p.opt.MinIdleConns == 0 {
return
}
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
p.poolSize++
p.idleConnsLen++
go func() {
err := p.addIdleConn()
if err != nil {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
p.connsMu.Unlock()
}
}()
}
}
func (p *ConnPool) addIdleConn() error {
cn, err := p.dialConn(context.TODO(), true)
if err != nil {
return err
}
p.connsMu.Lock()
p.conns = append(p.conns, cn)
p.idleConns = append(p.idleConns, cn)
p.connsMu.Unlock()
return nil
}
func (p *ConnPool) NewConn(c context.Context) (*Conn, error) {
return p.newConn(c, false)
}
func (p *ConnPool) newConn(c context.Context, pooled bool) (*Conn, error) {
cn, err := p.dialConn(c, pooled)
if err != nil {
return nil, err
}
p.connsMu.Lock()
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
if p.poolSize >= p.opt.PoolSize {
cn.pooled = false
} else {
p.poolSize++
}
}
p.connsMu.Unlock()
return cn, nil
}
func (p *ConnPool) dialConn(c context.Context, pooled bool) (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
return nil, p.getLastDialError()
}
netConn, err := p.opt.Dialer(c)
if err != nil {
p.setLastDialError(err)
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
go p.tryDial()
}
return nil, err
}
cn := NewConn(netConn)
cn.pooled = pooled
return cn, nil
}
func (p *ConnPool) tryDial() {
for {
if p.closed() {
return
}
conn, err := p.opt.Dialer(context.TODO())
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
continue
}
atomic.StoreUint32(&p.dialErrorsNum, 0)
_ = conn.Close()
return
}
}
func (p *ConnPool) setLastDialError(err error) {
p.lastDialErrorMu.Lock()
p.lastDialError = err
p.lastDialErrorMu.Unlock()
}
func (p *ConnPool) getLastDialError() error {
p.lastDialErrorMu.RLock()
err := p.lastDialError
p.lastDialErrorMu.RUnlock()
return err
}
// Get returns existed connection from the pool or creates a new one.
func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
err := p.waitTurn(ctx)
if err != nil {
return nil, err
}
for {
p.connsMu.Lock()
cn := p.popIdle()
p.connsMu.Unlock()
if cn == nil {
break
}
if p.isStaleConn(cn) {
_ = p.CloseConn(cn)
continue
}
atomic.AddUint32(&p.stats.Hits, 1)
return cn, nil
}
atomic.AddUint32(&p.stats.Misses, 1)
newcn, err := p.newConn(ctx, true)
if err != nil {
p.freeTurn()
return nil, err
}
return newcn, nil
}
func (p *ConnPool) getTurn() {
p.queue <- struct{}{}
}
func (p *ConnPool) waitTurn(c context.Context) error {
select {
case <-c.Done():
return c.Err()
default:
}
select {
case p.queue <- struct{}{}:
return nil
default:
}
timer := timers.Get().(*time.Timer)
timer.Reset(p.opt.PoolTimeout)
select {
case <-c.Done():
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
return c.Err()
case p.queue <- struct{}{}:
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
return nil
case <-timer.C:
timers.Put(timer)
atomic.AddUint32(&p.stats.Timeouts, 1)
return ErrPoolTimeout
}
}
func (p *ConnPool) freeTurn() {
<-p.queue
}
func (p *ConnPool) popIdle() *Conn {
if len(p.idleConns) == 0 {
return nil
}
idx := len(p.idleConns) - 1
cn := p.idleConns[idx]
p.idleConns = p.idleConns[:idx]
p.idleConnsLen--
p.checkMinIdleConns()
return cn
}
func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
if !cn.pooled {
p.Remove(ctx, cn, nil)
return
}
p.connsMu.Lock()
p.idleConns = append(p.idleConns, cn)
p.idleConnsLen++
p.connsMu.Unlock()
p.freeTurn()
}
func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
p.removeConnWithLock(cn)
p.freeTurn()
_ = p.closeConn(cn)
}
func (p *ConnPool) CloseConn(cn *Conn) error {
p.removeConnWithLock(cn)
return p.closeConn(cn)
}
func (p *ConnPool) removeConnWithLock(cn *Conn) {
p.connsMu.Lock()
p.removeConn(cn)
p.connsMu.Unlock()
}
func (p *ConnPool) removeConn(cn *Conn) {
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
if cn.pooled {
p.poolSize--
p.checkMinIdleConns()
}
return
}
}
}
func (p *ConnPool) closeConn(cn *Conn) error {
if p.opt.OnClose != nil {
_ = p.opt.OnClose(cn)
}
return cn.Close()
}
// Len returns total number of connections.
func (p *ConnPool) Len() int {
p.connsMu.Lock()
n := len(p.conns)
p.connsMu.Unlock()
return n
}
// IdleLen returns number of idle connections.
func (p *ConnPool) IdleLen() int {
p.connsMu.Lock()
n := p.idleConnsLen
p.connsMu.Unlock()
return n
}
func (p *ConnPool) Stats() *Stats {
idleLen := p.IdleLen()
return &Stats{
Hits: atomic.LoadUint32(&p.stats.Hits),
Misses: atomic.LoadUint32(&p.stats.Misses),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
IdleConns: uint32(idleLen),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
func (p *ConnPool) closed() bool {
return atomic.LoadUint32(&p._closed) == 1
}
func (p *ConnPool) Filter(fn func(*Conn) bool) error {
var firstErr error
p.connsMu.Lock()
for _, cn := range p.conns {
if fn(cn) {
if err := p.closeConn(cn); err != nil && firstErr == nil {
firstErr = err
}
}
}
p.connsMu.Unlock()
return firstErr
}
func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
var firstErr error
p.connsMu.Lock()
for _, cn := range p.conns {
if err := p.closeConn(cn); err != nil && firstErr == nil {
firstErr = err
}
}
p.conns = nil
p.poolSize = 0
p.idleConns = nil
p.idleConnsLen = 0
p.connsMu.Unlock()
return firstErr
}
func (p *ConnPool) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for range ticker.C {
if p.closed() {
break
}
n, err := p.ReapStaleConns()
if err != nil {
internal.Logger.Printf(context.TODO(), "ReapStaleConns failed: %s", err)
continue
}
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
}
}
func (p *ConnPool) ReapStaleConns() (int, error) {
var n int
for {
p.getTurn()
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
p.freeTurn()
if cn != nil {
_ = p.closeConn(cn)
n++
} else {
break
}
}
return n, nil
}
func (p *ConnPool) reapStaleConn() *Conn {
if len(p.idleConns) == 0 {
return nil
}
cn := p.idleConns[0]
if !p.isStaleConn(cn) {
return nil
}
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
p.idleConnsLen--
p.removeConn(cn)
return cn
}
func (p *ConnPool) isStaleConn(cn *Conn) bool {
if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
return false
}
now := time.Now()
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
return true
}
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
return true
}
return false
}

View file

@ -0,0 +1,58 @@
package pool
import "context"
type SingleConnPool struct {
pool Pooler
cn *Conn
stickyErr error
}
var _ Pooler = (*SingleConnPool)(nil)
func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
return &SingleConnPool{
pool: pool,
cn: cn,
}
}
func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
return p.pool.NewConn(ctx)
}
func (p *SingleConnPool) CloseConn(cn *Conn) error {
return p.pool.CloseConn(cn)
}
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
if p.stickyErr != nil {
return nil, p.stickyErr
}
return p.cn, nil
}
func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
p.cn = nil
p.stickyErr = reason
}
func (p *SingleConnPool) Close() error {
p.cn = nil
p.stickyErr = ErrClosed
return nil
}
func (p *SingleConnPool) Len() int {
return 0
}
func (p *SingleConnPool) IdleLen() int {
return 0
}
func (p *SingleConnPool) Stats() *Stats {
return &Stats{}
}

View file

@ -0,0 +1,202 @@
package pool
import (
"context"
"errors"
"fmt"
"sync/atomic"
)
const (
stateDefault = 0
stateInited = 1
stateClosed = 2
)
type BadConnError struct {
wrapped error
}
var _ error = (*BadConnError)(nil)
func (e BadConnError) Error() string {
s := "pg: Conn is in a bad state"
if e.wrapped != nil {
s += ": " + e.wrapped.Error()
}
return s
}
func (e BadConnError) Unwrap() error {
return e.wrapped
}
//------------------------------------------------------------------------------
type StickyConnPool struct {
pool Pooler
shared int32 // atomic
state uint32 // atomic
ch chan *Conn
_badConnError atomic.Value
}
var _ Pooler = (*StickyConnPool)(nil)
func NewStickyConnPool(pool Pooler) *StickyConnPool {
p, ok := pool.(*StickyConnPool)
if !ok {
p = &StickyConnPool{
pool: pool,
ch: make(chan *Conn, 1),
}
}
atomic.AddInt32(&p.shared, 1)
return p
}
func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
return p.pool.NewConn(ctx)
}
func (p *StickyConnPool) CloseConn(cn *Conn) error {
return p.pool.CloseConn(cn)
}
func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
// In worst case this races with Close which is not a very common operation.
for i := 0; i < 1000; i++ {
switch atomic.LoadUint32(&p.state) {
case stateDefault:
cn, err := p.pool.Get(ctx)
if err != nil {
return nil, err
}
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
return cn, nil
}
p.pool.Remove(ctx, cn, ErrClosed)
case stateInited:
if err := p.badConnError(); err != nil {
return nil, err
}
cn, ok := <-p.ch
if !ok {
return nil, ErrClosed
}
return cn, nil
case stateClosed:
return nil, ErrClosed
default:
panic("not reached")
}
}
return nil, fmt.Errorf("pg: StickyConnPool.Get: infinite loop")
}
func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
defer func() {
if recover() != nil {
p.freeConn(ctx, cn)
}
}()
p.ch <- cn
}
func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
if err := p.badConnError(); err != nil {
p.pool.Remove(ctx, cn, err)
} else {
p.pool.Put(ctx, cn)
}
}
func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
defer func() {
if recover() != nil {
p.pool.Remove(ctx, cn, ErrClosed)
}
}()
p._badConnError.Store(BadConnError{wrapped: reason})
p.ch <- cn
}
func (p *StickyConnPool) Close() error {
if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
return nil
}
for i := 0; i < 1000; i++ {
state := atomic.LoadUint32(&p.state)
if state == stateClosed {
return ErrClosed
}
if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
close(p.ch)
cn, ok := <-p.ch
if ok {
p.freeConn(context.TODO(), cn)
}
return nil
}
}
return errors.New("pg: StickyConnPool.Close: infinite loop")
}
func (p *StickyConnPool) Reset(ctx context.Context) error {
if p.badConnError() == nil {
return nil
}
select {
case cn, ok := <-p.ch:
if !ok {
return ErrClosed
}
p.pool.Remove(ctx, cn, ErrClosed)
p._badConnError.Store(BadConnError{wrapped: nil})
default:
return errors.New("pg: StickyConnPool does not have a Conn")
}
if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
state := atomic.LoadUint32(&p.state)
return fmt.Errorf("pg: invalid StickyConnPool state: %d", state)
}
return nil
}
func (p *StickyConnPool) badConnError() error {
if v := p._badConnError.Load(); v != nil {
err := v.(BadConnError)
if err.wrapped != nil {
return err
}
}
return nil
}
func (p *StickyConnPool) Len() int {
switch atomic.LoadUint32(&p.state) {
case stateDefault:
return 0
case stateInited:
return 1
case stateClosed:
return 0
default:
panic("not reached")
}
}
func (p *StickyConnPool) IdleLen() int {
return len(p.ch)
}
func (p *StickyConnPool) Stats() *Stats {
return &Stats{}
}

80
vendor/github.com/go-pg/pg/v10/internal/pool/reader.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
package pool
import (
"sync"
)
type Reader interface {
Buffered() int
Bytes() []byte
Read([]byte) (int, error)
ReadByte() (byte, error)
UnreadByte() error
ReadSlice(byte) ([]byte, error)
Discard(int) (int, error)
// ReadBytes(fn func(byte) bool) ([]byte, error)
// ReadN(int) ([]byte, error)
ReadFull() ([]byte, error)
ReadFullTemp() ([]byte, error)
}
type ColumnInfo struct {
Index int16
DataType int32
Name string
}
type ColumnAlloc struct {
columns []ColumnInfo
}
func NewColumnAlloc() *ColumnAlloc {
return new(ColumnAlloc)
}
func (c *ColumnAlloc) Reset() {
c.columns = c.columns[:0]
}
func (c *ColumnAlloc) New(index int16, name []byte) *ColumnInfo {
c.columns = append(c.columns, ColumnInfo{
Index: index,
Name: string(name),
})
return &c.columns[len(c.columns)-1]
}
func (c *ColumnAlloc) Columns() []ColumnInfo {
return c.columns
}
type ReaderContext struct {
*BufReader
ColumnAlloc *ColumnAlloc
}
func NewReaderContext() *ReaderContext {
const bufSize = 1 << 20 // 1mb
return &ReaderContext{
BufReader: NewBufReader(bufSize),
ColumnAlloc: NewColumnAlloc(),
}
}
var readerPool = sync.Pool{
New: func() interface{} {
return NewReaderContext()
},
}
func GetReaderContext() *ReaderContext {
rd := readerPool.Get().(*ReaderContext)
return rd
}
func PutReaderContext(rd *ReaderContext) {
rd.ColumnAlloc.Reset()
readerPool.Put(rd)
}

View file

@ -0,0 +1,431 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pool
import (
"bufio"
"bytes"
"io"
)
type BufReader struct {
rd io.Reader // reader provided by the client
buf []byte
r, w int // buf read and write positions
lastByte int
bytesRead int64
err error
available int // bytes available for reading
brd BytesReader // reusable bytes reader
}
func NewBufReader(bufSize int) *BufReader {
return &BufReader{
buf: make([]byte, bufSize),
available: -1,
}
}
func (b *BufReader) BytesReader(n int) *BytesReader {
if n == -1 {
n = 0
}
buf := b.buf[b.r : b.r+n]
b.r += n
b.brd.Reset(buf)
return &b.brd
}
func (b *BufReader) SetAvailable(n int) {
b.available = n
}
func (b *BufReader) Available() int {
return b.available
}
func (b *BufReader) changeAvailable(n int) {
if b.available != -1 {
b.available += n
}
}
func (b *BufReader) Reset(rd io.Reader) {
b.rd = rd
b.r, b.w = 0, 0
b.err = nil
}
// Buffered returns the number of bytes that can be read from the current buffer.
func (b *BufReader) Buffered() int {
buffered := b.w - b.r
if b.available == -1 || buffered <= b.available {
return buffered
}
return b.available
}
func (b *BufReader) Bytes() []byte {
if b.available == -1 {
return b.buf[b.r:b.w]
}
w := b.r + b.available
if w > b.w {
w = b.w
}
return b.buf[b.r:w]
}
func (b *BufReader) flush() []byte {
if b.available == -1 {
buf := b.buf[b.r:b.w]
b.r = b.w
return buf
}
w := b.r + b.available
if w > b.w {
w = b.w
}
buf := b.buf[b.r:w]
b.r = w
b.changeAvailable(-len(buf))
return buf
}
// fill reads a new chunk into the buffer.
func (b *BufReader) fill() {
// Slide existing data to beginning.
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
if b.w >= len(b.buf) {
panic("bufio: tried to fill full buffer")
}
if b.available == 0 {
b.err = io.EOF
return
}
// Read new data: try a limited number of times.
const maxConsecutiveEmptyReads = 100
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err := b.read(b.buf[b.w:])
b.w += n
if err != nil {
b.err = err
return
}
if n > 0 {
return
}
}
b.err = io.ErrNoProgress
}
func (b *BufReader) readErr() error {
err := b.err
b.err = nil
return err
}
func (b *BufReader) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, b.readErr()
}
if b.available != -1 {
if b.available == 0 {
return 0, io.EOF
}
if len(p) > b.available {
p = p[:b.available]
}
}
if b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
if len(p) >= len(b.buf) {
// Large read, empty buffer.
// Read directly into p to avoid copy.
n, err = b.read(p)
if n > 0 {
b.changeAvailable(-n)
b.lastByte = int(p[n-1])
}
return n, err
}
// One read.
// Do not use b.fill, which will loop.
b.r = 0
b.w = 0
n, b.err = b.read(b.buf)
if n == 0 {
return 0, b.readErr()
}
b.w += n
}
// copy as much as we can
n = copy(p, b.Bytes())
b.r += n
b.changeAvailable(-n)
b.lastByte = int(b.buf[b.r-1])
return n, nil
}
// ReadSlice reads until the first occurrence of delim in the input,
// returning a slice pointing at the bytes in the buffer.
// The bytes stop being valid at the next read.
// If ReadSlice encounters an error before finding a delimiter,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
// ReadBytes or ReadString instead.
// ReadSlice returns err != nil if and only if line does not end in delim.
func (b *BufReader) ReadSlice(delim byte) (line []byte, err error) {
for {
// Search buffer.
if i := bytes.IndexByte(b.Bytes(), delim); i >= 0 {
i++
line = b.buf[b.r : b.r+i]
b.r += i
b.changeAvailable(-i)
break
}
// Pending error?
if b.err != nil {
line = b.flush()
err = b.readErr()
break
}
buffered := b.Buffered()
// Out of available.
if b.available != -1 && buffered >= b.available {
line = b.flush()
err = io.EOF
break
}
// Buffer full?
if buffered >= len(b.buf) {
line = b.flush()
err = bufio.ErrBufferFull
break
}
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
}
return line, err
}
func (b *BufReader) ReadBytes(fn func(byte) bool) (line []byte, err error) {
for {
for i, c := range b.Bytes() {
if !fn(c) {
i--
line = b.buf[b.r : b.r+i] //nolint
b.r += i
b.changeAvailable(-i)
break
}
}
// Pending error?
if b.err != nil {
line = b.flush()
err = b.readErr()
break
}
buffered := b.Buffered()
// Out of available.
if b.available != -1 && buffered >= b.available {
line = b.flush()
err = io.EOF
break
}
// Buffer full?
if buffered >= len(b.buf) {
line = b.flush()
err = bufio.ErrBufferFull
break
}
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
}
return line, err
}
func (b *BufReader) ReadByte() (byte, error) {
if b.available == 0 {
return 0, io.EOF
}
for b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
b.fill() // buffer is empty
}
c := b.buf[b.r]
b.r++
b.lastByte = int(c)
b.changeAvailable(-1)
return c, nil
}
func (b *BufReader) UnreadByte() error {
if b.lastByte < 0 || b.r == 0 && b.w > 0 {
return bufio.ErrInvalidUnreadByte
}
// b.r > 0 || b.w == 0
if b.r > 0 {
b.r--
} else {
// b.r == 0 && b.w == 0
b.w = 1
}
b.buf[b.r] = byte(b.lastByte)
b.lastByte = -1
b.changeAvailable(+1)
return nil
}
// Discard skips the next n bytes, returning the number of bytes discarded.
//
// If Discard skips fewer than n bytes, it also returns an error.
// If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
// reading from the underlying io.BufReader.
func (b *BufReader) Discard(n int) (discarded int, err error) {
if n < 0 {
return 0, bufio.ErrNegativeCount
}
if n == 0 {
return
}
remain := n
for {
skip := b.Buffered()
if skip == 0 {
b.fill()
skip = b.Buffered()
}
if skip > remain {
skip = remain
}
b.r += skip
b.changeAvailable(-skip)
remain -= skip
if remain == 0 {
return n, nil
}
if b.err != nil {
return n - remain, b.readErr()
}
}
}
func (b *BufReader) ReadN(n int) (line []byte, err error) {
if n < 0 {
return nil, bufio.ErrNegativeCount
}
if n == 0 {
return
}
nn := n
if b.available != -1 && nn > b.available {
nn = b.available
}
for {
buffered := b.Buffered()
if buffered >= nn {
line = b.buf[b.r : b.r+nn]
b.r += nn
b.changeAvailable(-nn)
if n > nn {
err = io.EOF
}
break
}
// Pending error?
if b.err != nil {
line = b.flush()
err = b.readErr()
break
}
// Buffer full?
if buffered >= len(b.buf) {
line = b.flush()
err = bufio.ErrBufferFull
break
}
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
}
return line, err
}
func (b *BufReader) ReadFull() ([]byte, error) {
if b.available == -1 {
panic("not reached")
}
buf := make([]byte, b.available)
_, err := io.ReadFull(b, buf)
return buf, err
}
func (b *BufReader) ReadFullTemp() ([]byte, error) {
if b.available == -1 {
panic("not reached")
}
if b.available <= len(b.buf) {
return b.ReadN(b.available)
}
return b.ReadFull()
}
func (b *BufReader) read(buf []byte) (int, error) {
n, err := b.rd.Read(buf)
b.bytesRead += int64(n)
return n, err
}

View file

@ -0,0 +1,121 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pool
import (
"bytes"
"errors"
"io"
)
type BytesReader struct {
s []byte
i int
}
func NewBytesReader(b []byte) *BytesReader {
return &BytesReader{
s: b,
}
}
func (r *BytesReader) Reset(b []byte) {
r.s = b
r.i = 0
}
func (r *BytesReader) Buffered() int {
return len(r.s) - r.i
}
func (r *BytesReader) Bytes() []byte {
return r.s[r.i:]
}
func (r *BytesReader) Read(b []byte) (n int, err error) {
if r.i >= len(r.s) {
return 0, io.EOF
}
n = copy(b, r.s[r.i:])
r.i += n
return
}
func (r *BytesReader) ReadByte() (byte, error) {
if r.i >= len(r.s) {
return 0, io.EOF
}
b := r.s[r.i]
r.i++
return b, nil
}
func (r *BytesReader) UnreadByte() error {
if r.i <= 0 {
return errors.New("UnreadByte: at beginning of slice")
}
r.i--
return nil
}
func (r *BytesReader) ReadSlice(delim byte) ([]byte, error) {
if i := bytes.IndexByte(r.s[r.i:], delim); i >= 0 {
i++
line := r.s[r.i : r.i+i]
r.i += i
return line, nil
}
line := r.s[r.i:]
r.i = len(r.s)
return line, io.EOF
}
func (r *BytesReader) ReadBytes(fn func(byte) bool) ([]byte, error) {
for i, c := range r.s[r.i:] {
if !fn(c) {
i++
line := r.s[r.i : r.i+i]
r.i += i
return line, nil
}
}
line := r.s[r.i:]
r.i = len(r.s)
return line, io.EOF
}
func (r *BytesReader) Discard(n int) (int, error) {
b, err := r.ReadN(n)
return len(b), err
}
func (r *BytesReader) ReadN(n int) ([]byte, error) {
nn := n
if nn > len(r.s) {
nn = len(r.s)
}
b := r.s[r.i : r.i+nn]
r.i += nn
if n > nn {
return b, io.EOF
}
return b, nil
}
func (r *BytesReader) ReadFull() ([]byte, error) {
b := make([]byte, len(r.s)-r.i)
copy(b, r.s[r.i:])
r.i = len(r.s)
return b, nil
}
func (r *BytesReader) ReadFullTemp() ([]byte, error) {
b := r.s[r.i:]
r.i = len(r.s)
return b, nil
}

View file

@ -0,0 +1,114 @@
package pool
import (
"encoding/binary"
"io"
"sync"
)
const defaultBufSize = 65 << 10 // 65kb
var wbPool = sync.Pool{
New: func() interface{} {
return NewWriteBuffer()
},
}
func GetWriteBuffer() *WriteBuffer {
wb := wbPool.Get().(*WriteBuffer)
return wb
}
func PutWriteBuffer(wb *WriteBuffer) {
wb.Reset()
wbPool.Put(wb)
}
type WriteBuffer struct {
Bytes []byte
msgStart int
paramStart int
}
func NewWriteBuffer() *WriteBuffer {
return &WriteBuffer{
Bytes: make([]byte, 0, defaultBufSize),
}
}
func (buf *WriteBuffer) Reset() {
buf.Bytes = buf.Bytes[:0]
}
func (buf *WriteBuffer) StartMessage(c byte) {
if c == 0 {
buf.msgStart = len(buf.Bytes)
buf.Bytes = append(buf.Bytes, 0, 0, 0, 0)
} else {
buf.msgStart = len(buf.Bytes) + 1
buf.Bytes = append(buf.Bytes, c, 0, 0, 0, 0)
}
}
func (buf *WriteBuffer) FinishMessage() {
binary.BigEndian.PutUint32(
buf.Bytes[buf.msgStart:], uint32(len(buf.Bytes)-buf.msgStart))
}
func (buf *WriteBuffer) Query() []byte {
return buf.Bytes[buf.msgStart+4 : len(buf.Bytes)-1]
}
func (buf *WriteBuffer) StartParam() {
buf.paramStart = len(buf.Bytes)
buf.Bytes = append(buf.Bytes, 0, 0, 0, 0)
}
func (buf *WriteBuffer) FinishParam() {
binary.BigEndian.PutUint32(
buf.Bytes[buf.paramStart:], uint32(len(buf.Bytes)-buf.paramStart-4))
}
var nullParamLength = int32(-1)
func (buf *WriteBuffer) FinishNullParam() {
binary.BigEndian.PutUint32(
buf.Bytes[buf.paramStart:], uint32(nullParamLength))
}
func (buf *WriteBuffer) Write(b []byte) (int, error) {
buf.Bytes = append(buf.Bytes, b...)
return len(b), nil
}
func (buf *WriteBuffer) WriteInt16(num int16) {
buf.Bytes = append(buf.Bytes, 0, 0)
binary.BigEndian.PutUint16(buf.Bytes[len(buf.Bytes)-2:], uint16(num))
}
func (buf *WriteBuffer) WriteInt32(num int32) {
buf.Bytes = append(buf.Bytes, 0, 0, 0, 0)
binary.BigEndian.PutUint32(buf.Bytes[len(buf.Bytes)-4:], uint32(num))
}
func (buf *WriteBuffer) WriteString(s string) {
buf.Bytes = append(buf.Bytes, s...)
buf.Bytes = append(buf.Bytes, 0)
}
func (buf *WriteBuffer) WriteBytes(b []byte) {
buf.Bytes = append(buf.Bytes, b...)
buf.Bytes = append(buf.Bytes, 0)
}
func (buf *WriteBuffer) WriteByte(c byte) error {
buf.Bytes = append(buf.Bytes, c)
return nil
}
func (buf *WriteBuffer) ReadFrom(r io.Reader) (int64, error) {
n, err := r.Read(buf.Bytes[len(buf.Bytes):cap(buf.Bytes)])
buf.Bytes = buf.Bytes[:len(buf.Bytes)+n]
return int64(n), err
}

11
vendor/github.com/go-pg/pg/v10/internal/safe.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// +build appengine
package internal
func BytesToString(b []byte) string {
return string(b)
}
func StringToBytes(s string) []byte {
return []byte(s)
}

19
vendor/github.com/go-pg/pg/v10/internal/strconv.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
package internal
import "strconv"
func Atoi(b []byte) (int, error) {
return strconv.Atoi(BytesToString(b))
}
func ParseInt(b []byte, base int, bitSize int) (int64, error) {
return strconv.ParseInt(BytesToString(b), base, bitSize)
}
func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
return strconv.ParseUint(BytesToString(b), base, bitSize)
}
func ParseFloat(b []byte, bitSize int) (float64, error) {
return strconv.ParseFloat(BytesToString(b), bitSize)
}

93
vendor/github.com/go-pg/pg/v10/internal/underscore.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
package internal
func IsUpper(c byte) bool {
return c >= 'A' && c <= 'Z'
}
func IsLower(c byte) bool {
return c >= 'a' && c <= 'z'
}
func ToUpper(c byte) byte {
return c - 32
}
func ToLower(c byte) byte {
return c + 32
}
// Underscore converts "CamelCasedString" to "camel_cased_string".
func Underscore(s string) string {
r := make([]byte, 0, len(s)+5)
for i := 0; i < len(s); i++ {
c := s[i]
if IsUpper(c) {
if i > 0 && i+1 < len(s) && (IsLower(s[i-1]) || IsLower(s[i+1])) {
r = append(r, '_', ToLower(c))
} else {
r = append(r, ToLower(c))
}
} else {
r = append(r, c)
}
}
return string(r)
}
func CamelCased(s string) string {
r := make([]byte, 0, len(s))
upperNext := true
for i := 0; i < len(s); i++ {
c := s[i]
if c == '_' {
upperNext = true
continue
}
if upperNext {
if IsLower(c) {
c = ToUpper(c)
}
upperNext = false
}
r = append(r, c)
}
return string(r)
}
func ToExported(s string) string {
if len(s) == 0 {
return s
}
if c := s[0]; IsLower(c) {
b := []byte(s)
b[0] = ToUpper(c)
return string(b)
}
return s
}
func UpperString(s string) string {
if isUpperString(s) {
return s
}
b := make([]byte, len(s))
for i := range b {
c := s[i]
if IsLower(c) {
c = ToUpper(c)
}
b[i] = c
}
return string(b)
}
func isUpperString(s string) bool {
for i := 0; i < len(s); i++ {
c := s[i]
if IsLower(c) {
return false
}
}
return true
}

22
vendor/github.com/go-pg/pg/v10/internal/unsafe.go generated vendored Normal file
View file

@ -0,0 +1,22 @@
// +build !appengine
package internal
import (
"unsafe"
)
// BytesToString converts byte slice to string.
func BytesToString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// StringToBytes converts string to byte slice.
func StringToBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(
&struct {
string
Cap int
}{s, len(s)},
))
}

71
vendor/github.com/go-pg/pg/v10/internal/util.go generated vendored Normal file
View file

@ -0,0 +1,71 @@
package internal
import (
"context"
"reflect"
"time"
)
func Sleep(ctx context.Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
if v.Kind() == reflect.Array {
var pos int
return func() reflect.Value {
v := v.Index(pos)
pos++
return v
}
}
elemType := v.Type().Elem()
if elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
return func() reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
elem := v.Index(v.Len() - 1)
if elem.IsNil() {
elem.Set(reflect.New(elemType))
}
return elem.Elem()
}
elem := reflect.New(elemType)
v.Set(reflect.Append(v, elem))
return elem.Elem()
}
}
zero := reflect.Zero(elemType)
return func() reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
return v.Index(v.Len() - 1)
}
v.Set(reflect.Append(v, zero))
return v.Index(v.Len() - 1)
}
}
func Unwrap(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return nil
}
return u.Unwrap()
}