This is a draft for the sync.Map API proposed in golang/go#18177. It supports fast-path loads via an atomic variable, falling back to a Mutex for stores. In order to keep stores amortized to O(1), loads following a store follow the Mutex path until enough loads have occurred to offset the cost of a deep copy. For mostly-read loads, such as the maps in the reflect package in the standard library, this significantly reduces cache-line contention vs. a plain RWMutex with a map. goos: linux goarch: amd64 pkg: golang.org/x/sync/syncmap BenchmarkLoadMostlyHits/*syncmap_test.DeepCopyMap 20000000 73.1 ns/op BenchmarkLoadMostlyHits/*syncmap_test.DeepCopyMap-48 100000000 13.8 ns/op BenchmarkLoadMostlyHits/*syncmap_test.RWMutexMap 20000000 87.7 ns/op BenchmarkLoadMostlyHits/*syncmap_test.RWMutexMap-48 10000000 154 ns/op BenchmarkLoadMostlyHits/*syncmap.Map 20000000 72.1 ns/op BenchmarkLoadMostlyHits/*syncmap.Map-48 100000000 11.2 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.DeepCopyMap 20000000 63.2 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.DeepCopyMap-48 200000000 14.2 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.RWMutexMap 20000000 72.7 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.RWMutexMap-48 10000000 150 ns/op BenchmarkLoadMostlyMisses/*syncmap.Map 30000000 56.4 ns/op BenchmarkLoadMostlyMisses/*syncmap.Map-48 200000000 9.77 ns/op BenchmarkLoadOrStoreBalanced/*syncmap_test.RWMutexMap 2000000 683 ns/op BenchmarkLoadOrStoreBalanced/*syncmap_test.RWMutexMap-48 1000000 1394 ns/op BenchmarkLoadOrStoreBalanced/*syncmap.Map 2000000 645 ns/op BenchmarkLoadOrStoreBalanced/*syncmap.Map-48 1000000 1253 ns/op BenchmarkLoadOrStoreUnique/*syncmap_test.RWMutexMap 1000000 1015 ns/op BenchmarkLoadOrStoreUnique/*syncmap_test.RWMutexMap-48 1000000 1911 ns/op BenchmarkLoadOrStoreUnique/*syncmap.Map 1000000 1018 ns/op BenchmarkLoadOrStoreUnique/*syncmap.Map-48 1000000 1776 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.DeepCopyMap 50000000 30.2 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.DeepCopyMap-48 2000000000 1.24 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.RWMutexMap 30000000 50.1 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.RWMutexMap-48 5000000 451 ns/op BenchmarkLoadOrStoreCollision/*syncmap.Map 30000000 36.8 ns/op BenchmarkLoadOrStoreCollision/*syncmap.Map-48 2000000000 1.24 ns/op BenchmarkAdversarialAlloc/*syncmap_test.DeepCopyMap 10000000 213 ns/op BenchmarkAdversarialAlloc/*syncmap_test.DeepCopyMap-48 1000000 5012 ns/op BenchmarkAdversarialAlloc/*syncmap_test.RWMutexMap 20000000 68.8 ns/op BenchmarkAdversarialAlloc/*syncmap_test.RWMutexMap-48 5000000 429 ns/op BenchmarkAdversarialAlloc/*syncmap.Map 5000000 229 ns/op BenchmarkAdversarialAlloc/*syncmap.Map-48 2000000 600 ns/op BenchmarkAdversarialDelete/*syncmap_test.DeepCopyMap 5000000 314 ns/op BenchmarkAdversarialDelete/*syncmap_test.DeepCopyMap-48 2000000 726 ns/op BenchmarkAdversarialDelete/*syncmap_test.RWMutexMap 20000000 63.2 ns/op BenchmarkAdversarialDelete/*syncmap_test.RWMutexMap-48 5000000 469 ns/op BenchmarkAdversarialDelete/*syncmap.Map 10000000 203 ns/op BenchmarkAdversarialDelete/*syncmap.Map-48 10000000 253 ns/op goos: linux goarch: ppc64le pkg: golang.org/x/sync/syncmap BenchmarkLoadMostlyHits/*syncmap_test.DeepCopyMap 5000000 253 ns/op BenchmarkLoadMostlyHits/*syncmap_test.DeepCopyMap-48 50000000 26.2 ns/op BenchmarkLoadMostlyHits/*syncmap_test.RWMutexMap 5000000 505 ns/op BenchmarkLoadMostlyHits/*syncmap_test.RWMutexMap-48 3000000 443 ns/op BenchmarkLoadMostlyHits/*syncmap.Map 10000000 200 ns/op BenchmarkLoadMostlyHits/*syncmap.Map-48 100000000 18.1 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.DeepCopyMap 10000000 162 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.DeepCopyMap-48 100000000 23.8 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.RWMutexMap 10000000 195 ns/op BenchmarkLoadMostlyMisses/*syncmap_test.RWMutexMap-48 3000000 531 ns/op BenchmarkLoadMostlyMisses/*syncmap.Map 10000000 182 ns/op BenchmarkLoadMostlyMisses/*syncmap.Map-48 100000000 15.8 ns/op BenchmarkLoadOrStoreBalanced/*syncmap_test.RWMutexMap 1000000 1664 ns/op BenchmarkLoadOrStoreBalanced/*syncmap_test.RWMutexMap-48 1000000 1768 ns/op BenchmarkLoadOrStoreBalanced/*syncmap.Map 1000000 2128 ns/op BenchmarkLoadOrStoreBalanced/*syncmap.Map-48 1000000 1903 ns/op BenchmarkLoadOrStoreUnique/*syncmap_test.RWMutexMap 1000000 2657 ns/op BenchmarkLoadOrStoreUnique/*syncmap_test.RWMutexMap-48 1000000 2577 ns/op BenchmarkLoadOrStoreUnique/*syncmap.Map 1000000 1714 ns/op BenchmarkLoadOrStoreUnique/*syncmap.Map-48 1000000 2484 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.DeepCopyMap 10000000 130 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.DeepCopyMap-48 100000000 11.3 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.RWMutexMap 3000000 426 ns/op BenchmarkLoadOrStoreCollision/*syncmap_test.RWMutexMap-48 2000000 930 ns/op BenchmarkLoadOrStoreCollision/*syncmap.Map 10000000 131 ns/op BenchmarkLoadOrStoreCollision/*syncmap.Map-48 300000000 4.07 ns/op BenchmarkAdversarialAlloc/*syncmap_test.DeepCopyMap 3000000 447 ns/op BenchmarkAdversarialAlloc/*syncmap_test.DeepCopyMap-48 300000 4159 ns/op BenchmarkAdversarialAlloc/*syncmap_test.RWMutexMap 10000000 191 ns/op BenchmarkAdversarialAlloc/*syncmap_test.RWMutexMap-48 3000000 535 ns/op BenchmarkAdversarialAlloc/*syncmap.Map 2000000 525 ns/op BenchmarkAdversarialAlloc/*syncmap.Map-48 1000000 1000 ns/op BenchmarkAdversarialDelete/*syncmap_test.DeepCopyMap 2000000 711 ns/op BenchmarkAdversarialDelete/*syncmap_test.DeepCopyMap-48 2000000 900 ns/op BenchmarkAdversarialDelete/*syncmap_test.RWMutexMap 3000000 354 ns/op BenchmarkAdversarialDelete/*syncmap_test.RWMutexMap-48 3000000 473 ns/op BenchmarkAdversarialDelete/*syncmap.Map 2000000 1357 ns/op BenchmarkAdversarialDelete/*syncmap.Map-48 5000000 334 ns/op Updates golang/go#18177 Change-Id: I8d561b617b1cd2ca03a8e68a5d5a28a519a0ce38 Reviewed-on: https://go-review.googlesource.com/33912 Reviewed-by: Russ Cox <rsc@golang.org>
142 lines
3.3 KiB
Go
142 lines
3.3 KiB
Go
// Copyright 2016 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package syncmap_test
|
|
|
|
import (
|
|
"sync"
|
|
"sync/atomic"
|
|
)
|
|
|
|
// This file contains reference map implementations for unit-tests.
|
|
|
|
// mapInterface is the interface Map implements.
|
|
type mapInterface interface {
|
|
Load(interface{}) (interface{}, bool)
|
|
Store(key, value interface{})
|
|
LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
|
|
Delete(interface{})
|
|
Range(func(key, value interface{}) (shouldContinue bool))
|
|
}
|
|
|
|
// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
|
|
type RWMutexMap struct {
|
|
mu sync.RWMutex
|
|
dirty map[interface{}]interface{}
|
|
}
|
|
|
|
func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) {
|
|
m.mu.RLock()
|
|
value, ok = m.dirty[key]
|
|
m.mu.RUnlock()
|
|
return
|
|
}
|
|
|
|
func (m *RWMutexMap) Store(key, value interface{}) {
|
|
m.mu.Lock()
|
|
if m.dirty == nil {
|
|
m.dirty = make(map[interface{}]interface{})
|
|
}
|
|
m.dirty[key] = value
|
|
m.mu.Unlock()
|
|
}
|
|
|
|
func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
|
|
m.mu.Lock()
|
|
actual, loaded = m.dirty[key]
|
|
if !loaded {
|
|
actual = value
|
|
if m.dirty == nil {
|
|
m.dirty = make(map[interface{}]interface{})
|
|
}
|
|
m.dirty[key] = value
|
|
}
|
|
m.mu.Unlock()
|
|
return actual, loaded
|
|
}
|
|
|
|
func (m *RWMutexMap) Delete(key interface{}) {
|
|
m.mu.Lock()
|
|
delete(m.dirty, key)
|
|
m.mu.Unlock()
|
|
}
|
|
|
|
func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
|
|
m.mu.RLock()
|
|
defer m.mu.RUnlock()
|
|
for k, v := range m.dirty {
|
|
if !f(k, v) {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// DeepCopyMap is an implementation of mapInterface using a Mutex and
|
|
// atomic.Value. It makes deep copies of the map on every write to avoid
|
|
// acquiring the Mutex in Load.
|
|
type DeepCopyMap struct {
|
|
mu sync.Mutex
|
|
clean atomic.Value
|
|
}
|
|
|
|
func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) {
|
|
clean, _ := m.clean.Load().(map[interface{}]interface{})
|
|
value, ok = clean[key]
|
|
return value, ok
|
|
}
|
|
|
|
func (m *DeepCopyMap) Store(key, value interface{}) {
|
|
m.mu.Lock()
|
|
dirty := m.dirty()
|
|
dirty[key] = value
|
|
m.clean.Store(dirty)
|
|
m.mu.Unlock()
|
|
}
|
|
|
|
func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
|
|
clean, _ := m.clean.Load().(map[interface{}]interface{})
|
|
actual, loaded = clean[key]
|
|
if loaded {
|
|
return actual, loaded
|
|
}
|
|
|
|
m.mu.Lock()
|
|
// Reload clean in case it changed while we were waiting on m.mu.
|
|
clean, _ = m.clean.Load().(map[interface{}]interface{})
|
|
actual, loaded = clean[key]
|
|
if !loaded {
|
|
dirty := m.dirty()
|
|
dirty[key] = value
|
|
actual = value
|
|
m.clean.Store(dirty)
|
|
}
|
|
m.mu.Unlock()
|
|
return actual, loaded
|
|
}
|
|
|
|
func (m *DeepCopyMap) Delete(key interface{}) {
|
|
m.mu.Lock()
|
|
dirty := m.dirty()
|
|
delete(dirty, key)
|
|
m.clean.Store(dirty)
|
|
m.mu.Unlock()
|
|
}
|
|
|
|
func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
|
|
clean, _ := m.clean.Load().(map[interface{}]interface{})
|
|
for k, v := range clean {
|
|
if !f(k, v) {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
func (m *DeepCopyMap) dirty() map[interface{}]interface{} {
|
|
clean, _ := m.clean.Load().(map[interface{}]interface{})
|
|
dirty := make(map[interface{}]interface{}, len(clean)+1)
|
|
for k, v := range clean {
|
|
dirty[k] = v
|
|
}
|
|
return dirty
|
|
}
|