[chore] update dependencies (#4468)

- github.com/ncruces/go-sqlite3
- codeberg.org/gruf/go-mempool
- codeberg.org/gruf/go-structr (changes related on the above) *
- codeberg.org/gruf/go-mutexes (changes related on the above) *

* this is largely just fiddling around with package internals in structr and mutexes to rely on changes in mempool, which added a new concurrency-safe pool

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4468
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-10-03 15:29:41 +02:00 committed by kim
commit ff950e94bb
32 changed files with 706 additions and 317 deletions

View file

@ -1,3 +1,3 @@
# go-mempool
very simple memory pool implementation
very simple memory pool implementation

View file

@ -1,17 +1,17 @@
package mempool
import (
"sync"
"sync/atomic"
"unsafe"
"golang.org/x/sys/cpu"
)
const DefaultDirtyFactor = 128
// Pool provides a type-safe form
// of UnsafePool using generics.
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
// Pool provides a form of SimplePool
// with the addition of concurrency safety.
type Pool[T any] struct {
UnsafePool
// New is an optionally provided
// allocator used when no value
@ -21,79 +21,119 @@ type Pool[T any] struct {
// Reset is an optionally provided
// value resetting function called
// on passed value to Put().
Reset func(T)
Reset func(T) bool
}
UnsafePool
func NewPool[T any](new func() T, reset func(T) bool, check func(current, victim int) bool) Pool[T] {
return Pool[T]{
New: new,
Reset: reset,
UnsafePool: NewUnsafePool(check),
}
}
func (p *Pool[T]) Get() T {
if ptr := p.UnsafePool.Get(); ptr != nil {
return *(*T)(ptr)
} else if p.New != nil {
return p.New()
}
var z T
return z
var t T
if p.New != nil {
t = p.New()
}
return t
}
func (p *Pool[T]) Put(t T) {
if p.Reset != nil {
p.Reset(t)
if p.Reset != nil && !p.Reset(t) {
return
}
ptr := unsafe.Pointer(&t)
p.UnsafePool.Put(ptr)
}
// UnsafePool provides an incredibly
// simple memory pool implementation
// that stores ptrs to memory values,
// and regularly flushes internal pool
// structures according to DirtyFactor.
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
// UnsafePool provides a form of UnsafeSimplePool
// with the addition of concurrency safety.
type UnsafePool struct {
// DirtyFactor determines the max
// number of $dirty count before
// pool is garbage collected. Where:
// $dirty = len(current) - len(victim)
DirtyFactor int
current []unsafe.Pointer
victim []unsafe.Pointer
internal
_ [cache_line_size - unsafe.Sizeof(internal{})%cache_line_size]byte
}
func (p *UnsafePool) Get() unsafe.Pointer {
// First try current list.
if len(p.current) > 0 {
ptr := p.current[len(p.current)-1]
p.current = p.current[:len(p.current)-1]
func NewUnsafePool(check func(current, victim int) bool) UnsafePool {
return UnsafePool{internal: internal{
pool: UnsafeSimplePool{Check: check},
}}
}
const (
// current platform integer size.
int_size = 32 << (^uint(0) >> 63)
// platform CPU cache line size to avoid false sharing.
cache_line_size = unsafe.Sizeof(cpu.CacheLinePad{})
)
type internal struct {
// fast-access ring-buffer of
// pointers accessible by index.
//
// if Go ever exposes goroutine IDs
// to us we can make this a lot faster.
ring [int_size / 4]unsafe.Pointer
index atomic.Uint64
// underlying pool and
// slow mutex protection.
pool UnsafeSimplePool
mutex sync.Mutex
}
func (p *internal) Check(fn func(current, victim int) bool) func(current, victim int) bool {
p.mutex.Lock()
if fn == nil {
if p.pool.Check == nil {
fn = defaultCheck
} else {
fn = p.pool.Check
}
} else {
p.pool.Check = fn
}
p.mutex.Unlock()
return fn
}
func (p *internal) Get() unsafe.Pointer {
if ptr := atomic.SwapPointer(&p.ring[p.index.Load()%uint64(cap(p.ring))], nil); ptr != nil {
p.index.Add(^uint64(0)) // i.e. -1
return ptr
}
// Fallback to victim.
if len(p.victim) > 0 {
ptr := p.victim[len(p.victim)-1]
p.victim = p.victim[:len(p.victim)-1]
return ptr
}
return nil
p.mutex.Lock()
ptr := p.pool.Get()
p.mutex.Unlock()
return ptr
}
func (p *UnsafePool) Put(ptr unsafe.Pointer) {
p.current = append(p.current, ptr)
// Get dirty factor.
df := p.DirtyFactor
if df == 0 {
df = DefaultDirtyFactor
}
if len(p.current)-len(p.victim) > df {
// Garbage collection!
p.victim = p.current
p.current = nil
func (p *internal) Put(ptr unsafe.Pointer) {
if atomic.CompareAndSwapPointer(&p.ring[p.index.Add(1)%uint64(cap(p.ring))], nil, ptr) {
return
}
p.mutex.Lock()
p.pool.Put(ptr)
p.mutex.Unlock()
}
func (p *internal) GC() {
for i := range p.ring {
atomic.StorePointer(&p.ring[i], nil)
}
p.mutex.Lock()
p.pool.GC()
p.mutex.Unlock()
}
func (p *internal) Size() int {
p.mutex.Lock()
sz := p.pool.Size()
p.mutex.Unlock()
return sz
}

111
vendor/codeberg.org/gruf/go-mempool/simple.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
package mempool
import (
"unsafe"
)
// SimplePool provides a type-safe form
// of UnsafePool using generics.
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
type SimplePool[T any] struct {
UnsafeSimplePool
// New is an optionally provided
// allocator used when no value
// is available for use in pool.
New func() T
// Reset is an optionally provided
// value resetting function called
// on passed value to Put().
Reset func(T) bool
}
func (p *SimplePool[T]) Get() T {
if ptr := p.UnsafeSimplePool.Get(); ptr != nil {
return *(*T)(ptr)
}
var t T
if p.New != nil {
t = p.New()
}
return t
}
func (p *SimplePool[T]) Put(t T) {
if p.Reset != nil && !p.Reset(t) {
return
}
ptr := unsafe.Pointer(&t)
p.UnsafeSimplePool.Put(ptr)
}
// UnsafeSimplePool provides an incredibly
// simple memory pool implementation
// that stores ptrs to memory values,
// and regularly flushes internal pool
// structures according to CheckGC().
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
type UnsafeSimplePool struct {
// Check determines how often to flush
// internal pools based on underlying
// current and victim pool sizes. It gets
// called on every pool Put() operation.
//
// A flush will start a new current
// pool, make victim the old current,
// and drop the existing victim pool.
Check func(current, victim int) bool
current []unsafe.Pointer
victim []unsafe.Pointer
}
func (p *UnsafeSimplePool) Get() unsafe.Pointer {
// First try current list.
if len(p.current) > 0 {
ptr := p.current[len(p.current)-1]
p.current = p.current[:len(p.current)-1]
return ptr
}
// Fallback to victim.
if len(p.victim) > 0 {
ptr := p.victim[len(p.victim)-1]
p.victim = p.victim[:len(p.victim)-1]
return ptr
}
return nil
}
func (p *UnsafeSimplePool) Put(ptr unsafe.Pointer) {
p.current = append(p.current, ptr)
// Get GC check func.
if p.Check == nil {
p.Check = defaultCheck
}
if p.Check(len(p.current), len(p.victim)) {
p.GC() // garbage collection time!
}
}
func (p *UnsafeSimplePool) GC() {
p.victim = p.current
p.current = nil
}
func (p *UnsafeSimplePool) Size() int {
return len(p.current) + len(p.victim)
}
func defaultCheck(current, victim int) bool {
return current-victim > 128 || victim > 256
}

View file

@ -26,14 +26,13 @@ const (
type MutexMap struct {
mapmu sync.Mutex
mumap hashmap
mupool mempool.UnsafePool
mupool mempool.UnsafeSimplePool
}
// checkInit ensures MutexMap is initialized (UNSAFE).
func (mm *MutexMap) checkInit() {
if mm.mumap.m == nil {
mm.mumap.init(0)
mm.mupool.DirtyFactor = 256
}
}
@ -175,13 +174,9 @@ func (mu *rwmutex) Lock(lt uint8) bool {
// sleeping goroutines waiting on this mutex.
func (mu *rwmutex) Unlock() bool {
switch mu.l--; {
case mu.l > 0 && mu.t == lockTypeWrite:
panic("BUG: multiple writer locks")
case mu.l < 0:
panic("BUG: negative lock count")
case mu.l == 0:
// Fully unlocked.
// Fully
// unlock.
mu.t = 0
// Awake all blocked goroutines and check
@ -197,11 +192,15 @@ func (mu *rwmutex) Unlock() bool {
// (before == after) => (waiters = 0)
return (before == after)
default:
// i.e. mutex still
// locked by others.
return false
case mu.l < 0:
panic("BUG: negative lock count")
case mu.t == lockTypeWrite:
panic("BUG: multiple write locks")
}
// i.e. mutex still
// locked by others.
return false
}
// WaitRelock expects a mutex to be passed in, already in the

View file

@ -4,10 +4,10 @@ import (
"os"
"reflect"
"strings"
"sync"
"unsafe"
"codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-mempool"
"codeberg.org/gruf/go-xunsafe"
)
@ -371,17 +371,15 @@ type index_entry struct {
key string
}
var index_entry_pool sync.Pool
var index_entry_pool mempool.UnsafePool
// new_index_entry returns a new prepared index_entry.
func new_index_entry() *index_entry {
v := index_entry_pool.Get()
if v == nil {
e := new(index_entry)
e.elem.data = unsafe.Pointer(e)
v = e
if ptr := index_entry_pool.Get(); ptr != nil {
return (*index_entry)(ptr)
}
entry := v.(*index_entry)
entry := new(index_entry)
entry.elem.data = unsafe.Pointer(entry)
return entry
}
@ -396,7 +394,8 @@ func free_index_entry(entry *index_entry) {
entry.key = ""
entry.index = nil
entry.item = nil
index_entry_pool.Put(entry)
ptr := unsafe.Pointer(entry)
index_entry_pool.Put(ptr)
}
func is_unique(f uint8) bool {

View file

@ -2,8 +2,9 @@ package structr
import (
"os"
"sync"
"unsafe"
"codeberg.org/gruf/go-mempool"
)
type indexed_item struct {
@ -19,17 +20,15 @@ type indexed_item struct {
indexed []*index_entry
}
var indexed_item_pool sync.Pool
var indexed_item_pool mempool.UnsafePool
// new_indexed_item returns a new prepared indexed_item.
func new_indexed_item() *indexed_item {
v := indexed_item_pool.Get()
if v == nil {
i := new(indexed_item)
i.elem.data = unsafe.Pointer(i)
v = i
if ptr := indexed_item_pool.Get(); ptr != nil {
return (*indexed_item)(ptr)
}
item := v.(*indexed_item)
item := new(indexed_item)
item.elem.data = unsafe.Pointer(item)
return item
}
@ -43,7 +42,8 @@ func free_indexed_item(item *indexed_item) {
return
}
item.data = nil
indexed_item_pool.Put(item)
ptr := unsafe.Pointer(item)
indexed_item_pool.Put(ptr)
}
// drop_index will drop the given index entry from item's indexed.

View file

@ -2,8 +2,9 @@ package structr
import (
"os"
"sync"
"unsafe"
"codeberg.org/gruf/go-mempool"
)
// elem represents an elem
@ -27,16 +28,14 @@ type list struct {
len int
}
var list_pool sync.Pool
var list_pool mempool.UnsafePool
// new_list returns a new prepared list.
func new_list() *list {
v := list_pool.Get()
if v == nil {
v = new(list)
if ptr := list_pool.Get(); ptr != nil {
return (*list)(ptr)
}
list := v.(*list)
return list
return new(list)
}
// free_list releases the list.
@ -48,11 +47,13 @@ func free_list(list *list) {
os.Stderr.WriteString(msg + "\n")
return
}
list_pool.Put(list)
ptr := unsafe.Pointer(list)
list_pool.Put(ptr)
}
// push_front will push the given elem to front (head) of list.
func (l *list) push_front(elem *list_elem) {
// Set new head.
oldHead := l.head
l.head = elem
@ -66,12 +67,14 @@ func (l *list) push_front(elem *list_elem) {
l.tail = elem
}
// Incr count
// Incr
// count
l.len++
}
// push_back will push the given elem to back (tail) of list.
func (l *list) push_back(elem *list_elem) {
// Set new tail.
oldTail := l.tail
l.tail = elem
@ -85,7 +88,8 @@ func (l *list) push_back(elem *list_elem) {
l.head = elem
}
// Incr count
// Incr
// count
l.len++
}
@ -131,7 +135,8 @@ func (l *list) insert(elem *list_elem, at *list_elem) {
elem.next = oldNext
}
// Incr count
// Incr
// count
l.len++
}
@ -174,6 +179,7 @@ func (l *list) remove(elem *list_elem) {
prev.next = next
}
// Decr count
// Decr
// count
l.len--
}

View file

@ -146,7 +146,7 @@ func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype
sfield.mangle = mangler.Get(t)
// Calculate zero value string.
zptr := zero_value_field(o, sfield.offsets)
zptr := zero_value_ptr(o, sfield.offsets)
zstr := string(sfield.mangle(nil, zptr))
sfield.zerostr = zstr
sfield.zero = zptr
@ -154,7 +154,9 @@ func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype
return
}
// zero_value ...
// zero_value iterates the type contained in TypeIter{} along the given
// next_offset{} values, creating new ptrs where necessary, returning the
// zero reflect.Value{} after fully iterating the next_offset{} slice.
func zero_value(t xunsafe.TypeIter, offsets []next_offset) reflect.Value {
v := reflect.New(t.Type).Elem()
for _, offset := range offsets {
@ -175,8 +177,8 @@ func zero_value(t xunsafe.TypeIter, offsets []next_offset) reflect.Value {
return v
}
// zero_value_field ...
func zero_value_field(t xunsafe.TypeIter, offsets []next_offset) unsafe.Pointer {
// zero_value_ptr returns the unsafe pointer address of the result of zero_value().
func zero_value_ptr(t xunsafe.TypeIter, offsets []next_offset) unsafe.Pointer {
return zero_value(t, offsets).Addr().UnsafePointer()
}

View file

@ -8,6 +8,8 @@ import (
"strings"
"sync"
"unsafe"
"codeberg.org/gruf/go-mempool"
)
// Direction defines a direction
@ -1133,18 +1135,16 @@ func to_timeline_item(item *indexed_item) *timeline_item {
return to
}
var timeline_item_pool sync.Pool
var timeline_item_pool mempool.UnsafePool
// new_timeline_item returns a new prepared timeline_item.
func new_timeline_item() *timeline_item {
v := timeline_item_pool.Get()
if v == nil {
i := new(timeline_item)
i.elem.data = unsafe.Pointer(i)
i.ck = ^uint(0)
v = i
if ptr := timeline_item_pool.Get(); ptr != nil {
return (*timeline_item)(ptr)
}
item := v.(*timeline_item)
item := new(timeline_item)
item.elem.data = unsafe.Pointer(item)
item.ck = ^uint(0)
return item
}
@ -1159,5 +1159,6 @@ func free_timeline_item(item *timeline_item) {
}
item.data = nil
item.pk = nil
timeline_item_pool.Put(item)
ptr := unsafe.Pointer(item)
timeline_item_pool.Put(ptr)
}