1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
package typedsync
import (
"sync"
)
// Value is a typed equivalent of sync/atomic.Value.
//
// It is not actually a wrapper around sync/atomic.Value for
// allocation-performance reasons.
type Value[T comparable] struct {
mu sync.Mutex
ok bool
val T
}
// This uses a dumb mutex-based solution because
//
// 1. Performance is good enough, because in the fast-path mutexes
// use the same compare-and-swap as sync/atomic.Value; and because
// all of these methods are short we're unlikely to hit the
// mutex's slow path.
//
// 2. We could use sync/atomic.Pointer[T], which by itself would have
// the same performance characteristics as sync/atomic.Value but
// without the benefit of runtime_procPin()/runtime_procUnpin().
// We want to avoid that because it means we're doing an
// allocation for every store/swap; avoiding that is our whole
// reason for not just wraping sync/atomic.Value. So then we'd
// want to use a Pool to reuse allocations; but (1) that adds more
// sync-overhead, and (2) it also gets trickier because we'd have
// to be careful about not adding a pointer back to the pool when
// load has grabbed the pointer but not yet dereferenced it.
func (v *Value[T]) Load() (val T, ok bool) {
v.mu.Lock()
defer v.mu.Unlock()
return v.val, v.ok
}
func (v *Value[T]) Store(val T) {
v.mu.Lock()
defer v.mu.Unlock()
v.val, v.ok = val, true
}
func (v *Value[T]) Swap(newV T) (oldV T, oldOK bool) {
v.mu.Lock()
defer v.mu.Unlock()
oldV, oldOK = v.val, v.ok
v.val, v.ok = newV, true
return
}
func (v *Value[T]) CompareAndSwap(oldV, newV T) (swapped bool) {
v.mu.Lock()
defer v.mu.Unlock()
if !v.ok || v.val != oldV {
return false
}
v.val = newV
return true
}
|