2024-05-13 08:05:46 +00:00
|
|
|
package mempool
|
|
|
|
|
|
|
|
|
|
import (
|
2025-10-03 15:29:41 +02:00
|
|
|
"sync"
|
|
|
|
|
"sync/atomic"
|
2024-05-13 08:05:46 +00:00
|
|
|
"unsafe"
|
|
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
"golang.org/x/sys/cpu"
|
|
|
|
|
)
|
2024-05-13 08:05:46 +00:00
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
// Pool provides a form of SimplePool
|
|
|
|
|
// with the addition of concurrency safety.
|
2024-05-13 08:05:46 +00:00
|
|
|
type Pool[T any] struct {
|
2025-10-03 15:29:41 +02:00
|
|
|
UnsafePool
|
2024-05-13 08:05:46 +00:00
|
|
|
|
|
|
|
|
// New is an optionally provided
|
|
|
|
|
// allocator used when no value
|
|
|
|
|
// is available for use in pool.
|
|
|
|
|
New func() T
|
|
|
|
|
|
|
|
|
|
// Reset is an optionally provided
|
|
|
|
|
// value resetting function called
|
|
|
|
|
// on passed value to Put().
|
2025-10-03 15:29:41 +02:00
|
|
|
Reset func(T) bool
|
|
|
|
|
}
|
2024-05-13 08:05:46 +00:00
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
func NewPool[T any](new func() T, reset func(T) bool, check func(current, victim int) bool) Pool[T] {
|
|
|
|
|
return Pool[T]{
|
|
|
|
|
New: new,
|
|
|
|
|
Reset: reset,
|
|
|
|
|
UnsafePool: NewUnsafePool(check),
|
|
|
|
|
}
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *Pool[T]) Get() T {
|
|
|
|
|
if ptr := p.UnsafePool.Get(); ptr != nil {
|
|
|
|
|
return *(*T)(ptr)
|
|
|
|
|
}
|
2025-10-03 15:29:41 +02:00
|
|
|
var t T
|
|
|
|
|
if p.New != nil {
|
|
|
|
|
t = p.New()
|
|
|
|
|
}
|
|
|
|
|
return t
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *Pool[T]) Put(t T) {
|
2025-10-03 15:29:41 +02:00
|
|
|
if p.Reset != nil && !p.Reset(t) {
|
|
|
|
|
return
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
ptr := unsafe.Pointer(&t)
|
|
|
|
|
p.UnsafePool.Put(ptr)
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
// UnsafePool provides a form of UnsafeSimplePool
|
|
|
|
|
// with the addition of concurrency safety.
|
2024-05-13 08:05:46 +00:00
|
|
|
type UnsafePool struct {
|
2025-10-03 15:29:41 +02:00
|
|
|
internal
|
|
|
|
|
_ [cache_line_size - unsafe.Sizeof(internal{})%cache_line_size]byte
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func NewUnsafePool(check func(current, victim int) bool) UnsafePool {
|
|
|
|
|
return UnsafePool{internal: internal{
|
|
|
|
|
pool: UnsafeSimplePool{Check: check},
|
|
|
|
|
}}
|
|
|
|
|
}
|
2024-05-13 08:05:46 +00:00
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
const (
|
|
|
|
|
// current platform integer size.
|
|
|
|
|
int_size = 32 << (^uint(0) >> 63)
|
2024-05-13 08:05:46 +00:00
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
// platform CPU cache line size to avoid false sharing.
|
|
|
|
|
cache_line_size = unsafe.Sizeof(cpu.CacheLinePad{})
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type internal struct {
|
|
|
|
|
// fast-access ring-buffer of
|
|
|
|
|
// pointers accessible by index.
|
|
|
|
|
//
|
|
|
|
|
// if Go ever exposes goroutine IDs
|
|
|
|
|
// to us we can make this a lot faster.
|
|
|
|
|
ring [int_size / 4]unsafe.Pointer
|
|
|
|
|
index atomic.Uint64
|
|
|
|
|
|
|
|
|
|
// underlying pool and
|
|
|
|
|
// slow mutex protection.
|
|
|
|
|
pool UnsafeSimplePool
|
|
|
|
|
mutex sync.Mutex
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
func (p *internal) Check(fn func(current, victim int) bool) func(current, victim int) bool {
|
|
|
|
|
p.mutex.Lock()
|
|
|
|
|
if fn == nil {
|
|
|
|
|
if p.pool.Check == nil {
|
|
|
|
|
fn = defaultCheck
|
|
|
|
|
} else {
|
|
|
|
|
fn = p.pool.Check
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
p.pool.Check = fn
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
2025-10-03 15:29:41 +02:00
|
|
|
p.mutex.Unlock()
|
|
|
|
|
return fn
|
|
|
|
|
}
|
2024-05-13 08:05:46 +00:00
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
func (p *internal) Get() unsafe.Pointer {
|
|
|
|
|
if ptr := atomic.SwapPointer(&p.ring[p.index.Load()%uint64(cap(p.ring))], nil); ptr != nil {
|
|
|
|
|
p.index.Add(^uint64(0)) // i.e. -1
|
2024-05-13 08:05:46 +00:00
|
|
|
return ptr
|
|
|
|
|
}
|
2025-10-03 15:29:41 +02:00
|
|
|
p.mutex.Lock()
|
|
|
|
|
ptr := p.pool.Get()
|
|
|
|
|
p.mutex.Unlock()
|
|
|
|
|
return ptr
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
|
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
func (p *internal) Put(ptr unsafe.Pointer) {
|
|
|
|
|
if atomic.CompareAndSwapPointer(&p.ring[p.index.Add(1)%uint64(cap(p.ring))], nil, ptr) {
|
|
|
|
|
return
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
2025-10-03 15:29:41 +02:00
|
|
|
p.mutex.Lock()
|
|
|
|
|
p.pool.Put(ptr)
|
|
|
|
|
p.mutex.Unlock()
|
|
|
|
|
}
|
2024-05-13 08:05:46 +00:00
|
|
|
|
2025-10-03 15:29:41 +02:00
|
|
|
func (p *internal) GC() {
|
|
|
|
|
for i := range p.ring {
|
|
|
|
|
atomic.StorePointer(&p.ring[i], nil)
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|
2025-10-03 15:29:41 +02:00
|
|
|
p.mutex.Lock()
|
|
|
|
|
p.pool.GC()
|
|
|
|
|
p.mutex.Unlock()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (p *internal) Size() int {
|
|
|
|
|
p.mutex.Lock()
|
|
|
|
|
sz := p.pool.Size()
|
|
|
|
|
p.mutex.Unlock()
|
|
|
|
|
return sz
|
2024-05-13 08:05:46 +00:00
|
|
|
}
|