summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2023-01-09 14:23:23 -0700
committerLuke Shumaker <lukeshu@lukeshu.com>2023-02-12 16:16:53 -0700
commitd91f8ce17a6fc165fafd9dc921911233a69c34d2 (patch)
tree808a86b8f2309fe344b4cb0af62101b8a3b8b59a
parent1d7f5446dc37687f078269af3c63af7d7ebbfab4 (diff)
tree-wide: Migrate to the new ARCache
-rw-r--r--go.mod1
-rw-r--r--go.sum2
-rw-r--r--lib/btrfs/io4_fs.go24
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go18
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go14
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go12
-rw-r--r--lib/btrfsprogs/btrfsutil/skinny_paths.go22
-rw-r--r--lib/containers/arcache.go2
-rw-r--r--lib/containers/lru.go78
-rw-r--r--lib/containers/lrucache.go2
-rw-r--r--lib/containers/maputil.go32
-rw-r--r--lib/containers/sortedmap.go9
-rw-r--r--lib/diskio/file_blockbuf.go18
13 files changed, 100 insertions, 134 deletions
diff --git a/go.mod b/go.mod
index f2e26e7..3cd0cac 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,6 @@ require (
github.com/datawire/dlib v1.3.0
github.com/datawire/ocibuild v0.0.3-0.20220423003204-fc6a4e9f90dc
github.com/davecgh/go-spew v1.1.1
- github.com/hashicorp/golang-lru v0.5.4
github.com/jacobsa/fuse v0.0.0-20220702091825-13117049f383
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
diff --git a/go.sum b/go.sum
index 61b0f6d..716bc99 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,6 @@ github.com/datawire/ocibuild v0.0.3-0.20220423003204-fc6a4e9f90dc/go.mod h1:d10Q
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/lib/btrfs/io4_fs.go b/lib/btrfs/io4_fs.go
index fce9c76..adc0928 100644
--- a/lib/btrfs/io4_fs.go
+++ b/lib/btrfs/io4_fs.go
@@ -75,10 +75,10 @@ type Subvolume struct {
rootVal btrfsitem.Root
rootErr error
- bareInodeCache *containers.LRUCache[btrfsprim.ObjID, *BareInode]
- fullInodeCache *containers.LRUCache[btrfsprim.ObjID, *FullInode]
- dirCache *containers.LRUCache[btrfsprim.ObjID, *Dir]
- fileCache *containers.LRUCache[btrfsprim.ObjID, *File]
+ bareInodeCache containers.ARCache[btrfsprim.ObjID, *BareInode]
+ fullInodeCache containers.ARCache[btrfsprim.ObjID, *FullInode]
+ dirCache containers.ARCache[btrfsprim.ObjID, *Dir]
+ fileCache containers.ARCache[btrfsprim.ObjID, *File]
}
func (sv *Subvolume) init() {
@@ -97,10 +97,10 @@ func (sv *Subvolume) init() {
}
}
- sv.bareInodeCache = containers.NewLRUCache[btrfsprim.ObjID, *BareInode](textui.Tunable(128))
- sv.fullInodeCache = containers.NewLRUCache[btrfsprim.ObjID, *FullInode](textui.Tunable(128))
- sv.dirCache = containers.NewLRUCache[btrfsprim.ObjID, *Dir](textui.Tunable(128))
- sv.fileCache = containers.NewLRUCache[btrfsprim.ObjID, *File](textui.Tunable(128))
+ sv.bareInodeCache.MaxLen = textui.Tunable(128)
+ sv.fullInodeCache.MaxLen = textui.Tunable(128)
+ sv.dirCache.MaxLen = textui.Tunable(128)
+ sv.fileCache.MaxLen = textui.Tunable(128)
})
}
@@ -111,7 +111,7 @@ func (sv *Subvolume) GetRootInode() (btrfsprim.ObjID, error) {
func (sv *Subvolume) LoadBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
sv.init()
- val := sv.bareInodeCache.GetOrElse(inode, func() (val *BareInode) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *BareInode](&sv.bareInodeCache, inode, func(inode btrfsprim.ObjID) (val *BareInode) {
val = &BareInode{
Inode: inode,
}
@@ -144,7 +144,7 @@ func (sv *Subvolume) LoadBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
func (sv *Subvolume) LoadFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
sv.init()
- val := sv.fullInodeCache.GetOrElse(inode, func() (val *FullInode) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *FullInode](&sv.fullInodeCache, inode, func(indoe btrfsprim.ObjID) (val *FullInode) {
val = &FullInode{
BareInode: BareInode{
Inode: inode,
@@ -200,7 +200,7 @@ func (sv *Subvolume) LoadFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
func (sv *Subvolume) LoadDir(inode btrfsprim.ObjID) (*Dir, error) {
sv.init()
- val := sv.dirCache.GetOrElse(inode, func() (val *Dir) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *Dir](&sv.dirCache, inode, func(inode btrfsprim.ObjID) (val *Dir) {
val = new(Dir)
fullInode, err := sv.LoadFullInode(inode)
if err != nil {
@@ -336,7 +336,7 @@ func (dir *Dir) AbsPath() (string, error) {
func (sv *Subvolume) LoadFile(inode btrfsprim.ObjID) (*File, error) {
sv.init()
- val := sv.fileCache.GetOrElse(inode, func() (val *File) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *File](&sv.fileCache, inode, func(inode btrfsprim.ObjID) (val *File) {
val = new(File)
fullInode, err := sv.LoadFullInode(inode)
if err != nil {
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
index 45a5bb2..3eeea7f 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
@@ -64,9 +64,9 @@ type RebuiltForrest struct {
// mutable
trees typedsync.Map[btrfsprim.ObjID, *RebuiltTree]
- leafs *containers.LRUCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
- incItems *containers.LRUCache[btrfsprim.ObjID, *itemIndex]
- excItems *containers.LRUCache[btrfsprim.ObjID, *itemIndex]
+ leafs containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
+ incItems containers.ARCache[btrfsprim.ObjID, *itemIndex]
+ excItems containers.ARCache[btrfsprim.ObjID, *itemIndex]
}
// NewRebuiltForrest returns a new RebuiltForrest instance. All of
@@ -86,9 +86,15 @@ func NewRebuiltForrest(
cbLookupRoot: cbLookupRoot,
cbLookupUUID: cbLookupUUID,
- leafs: containers.NewLRUCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](textui.Tunable(8)),
- incItems: containers.NewLRUCache[btrfsprim.ObjID, *itemIndex](textui.Tunable(8)),
- excItems: containers.NewLRUCache[btrfsprim.ObjID, *itemIndex](textui.Tunable(8)),
+ leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{
+ MaxLen: textui.Tunable(8),
+ },
+ incItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{
+ MaxLen: textui.Tunable(8),
+ },
+ excItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{
+ MaxLen: textui.Tunable(8),
+ },
}
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
index 66cb0fa..c9d0fa4 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
@@ -49,7 +49,7 @@ type RebuiltTree struct {
// leafToRoots returns all leafs (lvl=0) in the filesystem that pass
// .isOwnerOK, whether or not they're in the tree.
func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
- return tree.forrest.leafs.GetOrElse(tree.ID, func() map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
+ return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-nodes", fmt.Sprintf("tree=%v", tree.ID))
nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
@@ -139,7 +139,7 @@ func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generati
// RebuiltTree's internal map!
func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID))
- return tree.items(ctx, tree.forrest.incItems, tree.Roots.HasAny)
+ return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny)
}
// PotentialItems returns a map of items that could be added to this
@@ -149,7 +149,7 @@ func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsp
// RebuiltTree's internal map!
func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID))
- return tree.items(ctx, tree.forrest.excItems,
+ return tree.items(ctx, &tree.forrest.excItems,
func(roots containers.Set[btrfsvol.LogicalAddr]) bool {
return !tree.Roots.HasAny(roots)
})
@@ -168,13 +168,13 @@ func (s itemStats) String() string {
s.Leafs, s.NumItems, s.NumDups)
}
-func (tree *RebuiltTree) items(ctx context.Context, cache *containers.LRUCache[btrfsprim.ObjID, *itemIndex],
+func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex],
leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool,
) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
tree.mu.RLock()
defer tree.mu.RUnlock()
- return cache.GetOrElse(tree.ID, func() *itemIndex {
+ return containers.LoadOrElse(cache, tree.ID, func(btrfsprim.ObjID) *itemIndex {
var leafs []btrfsvol.LogicalAddr
for leaf, roots := range tree.leafToRoots(ctx) {
if leafFn(roots) {
@@ -298,8 +298,8 @@ func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalA
progressWriter.Done()
tree.Roots.Insert(rootNode)
- tree.forrest.incItems.Remove(tree.ID) // force re-gen
- tree.forrest.excItems.Remove(tree.ID) // force re-gen
+ tree.forrest.incItems.Delete(tree.ID) // force re-gen
+ tree.forrest.excItems.Delete(tree.ID) // force re-gen
if (tree.ID == btrfsprim.ROOT_TREE_OBJECTID || tree.ID == btrfsprim.UUID_TREE_OBJECTID) && stats.AddedItems > 0 {
tree.forrest.trees.Range(func(otherTreeID btrfsprim.ObjID, otherTree *RebuiltTree) bool {
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
index 64a9828..a85b78e 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -48,7 +48,7 @@ type Handle struct {
Names map[ItemPtr][]byte // DIR_INDEX
Sizes map[ItemPtr]SizeAndErr // EXTENT_CSUM and EXTENT_DATA
- cache *containers.LRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]
+ cache containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]
}
func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle {
@@ -60,7 +60,9 @@ func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock)
Names: make(map[ItemPtr][]byte),
Sizes: make(map[ItemPtr]SizeAndErr),
- cache: containers.NewLRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]](textui.Tunable(8)),
+ cache: containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]{
+ MaxLen: textui.Tunable(8),
+ },
}
}
@@ -113,7 +115,7 @@ func (o *Handle) SetGraph(graph graph.Graph) {
}
func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
- if cached, ok := o.cache.Get(laddr); ok {
+ if cached, ok := o.cache.Load(laddr); ok {
dlog.Tracef(ctx, "cache-hit node@%v", laddr)
return cached
}
@@ -142,7 +144,7 @@ func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *disk
panic(fmt.Errorf("should not happen: i/o error: %w", err))
}
- o.cache.Add(laddr, ref)
+ o.cache.Store(laddr, ref)
return ref
}
diff --git a/lib/btrfsprogs/btrfsutil/skinny_paths.go b/lib/btrfsprogs/btrfsutil/skinny_paths.go
index 6a51739..4c314ec 100644
--- a/lib/btrfsprogs/btrfsutil/skinny_paths.go
+++ b/lib/btrfsprogs/btrfsutil/skinny_paths.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -29,21 +29,13 @@ type SkinnyPathArena struct {
SB btrfstree.Superblock
fatRoots map[btrfsvol.LogicalAddr]btrfstree.TreePathElem
- fatItems *containers.LRUCache[skinnyItem, btrfstree.TreePathElem]
+ fatItems containers.ARCache[skinnyItem, btrfstree.TreePathElem]
}
func (a *SkinnyPathArena) init() {
if a.fatRoots == nil {
a.fatRoots = make(map[btrfsvol.LogicalAddr]btrfstree.TreePathElem)
- // This cache size is sorta arbitrary. At first I figured
- // "let's allow 1GB of cached items", and figured 67bytes per
- // item, that's about 16M items. But with overhead of the
- // LRUCache, it's actually a lot higher than that. So then I
- // cut it to .5M, and that cut my total memory use to ~8GB,
- // which is a good number for me. Then I tought it to do a
- // better job of recovering trees, and so the memory grew, and I
- // cut it to 64K. Then to 8K. Then grew it to 128K.
- a.fatItems = containers.NewLRUCache[skinnyItem, btrfstree.TreePathElem](textui.Tunable(128 * 1024))
+ a.fatItems.MaxLen = textui.Tunable(128 * 1024)
}
}
@@ -54,7 +46,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
a.init()
- ret, ok := a.fatItems.Get(skinnyItem{
+ ret, ok := a.fatItems.Load(skinnyItem{
Node: parent.Node(-1).ToNodeAddr,
Item: itemIdx,
})
@@ -84,7 +76,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
ToKey: item.Key,
ToMaxKey: toMaxKey,
}
- a.fatItems.Add(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
+ a.fatItems.Store(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
if i == itemIdx {
ret = elem
}
@@ -100,7 +92,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
ToKey: item.Key,
ToMaxKey: item.Key,
}
- a.fatItems.Add(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
+ a.fatItems.Store(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
if i == itemIdx {
ret = elem
}
@@ -121,7 +113,7 @@ func (a *SkinnyPathArena) Deflate(fat btrfstree.TreePath) SkinnyPath {
a.fatRoots[elem.ToNodeAddr] = elem
ret.Root = elem.ToNodeAddr
} else {
- a.fatItems.Add(skinnyItem{Node: prevNode, Item: elem.FromItemIdx}, elem)
+ a.fatItems.Store(skinnyItem{Node: prevNode, Item: elem.FromItemIdx}, elem)
ret.Items = append(ret.Items, elem.FromItemIdx)
}
prevNode = elem.ToNodeAddr
diff --git a/lib/containers/arcache.go b/lib/containers/arcache.go
index ad551e9..1dc3b7e 100644
--- a/lib/containers/arcache.go
+++ b/lib/containers/arcache.go
@@ -53,6 +53,8 @@ type ARCache[K comparable, V any] struct {
noopChecker //nolint:unused // False positive; it is used.
}
+var _ Map[int, string] = (*ARCache[int, string])(nil)
+
//nolint:unused // False positive; it is used.
type noopChecker struct{}
diff --git a/lib/containers/lru.go b/lib/containers/lru.go
deleted file mode 100644
index aa372ed..0000000
--- a/lib/containers/lru.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
-//
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-package containers
-
-import (
- lru "github.com/hashicorp/golang-lru"
-)
-
-// LRUCache is a least-recently-used(ish) cache. A zero LRUCache is
-// not usable; it must be initialized with NewLRUCache.
-type LRUCache[K comparable, V any] struct {
- inner *lru.ARCCache
-}
-
-func NewLRUCache[K comparable, V any](size int) *LRUCache[K, V] {
- c := new(LRUCache[K, V])
- c.inner, _ = lru.NewARC(size)
- return c
-}
-
-func (c *LRUCache[K, V]) Add(key K, value V) {
- c.inner.Add(key, value)
-}
-
-func (c *LRUCache[K, V]) Contains(key K) bool {
- return c.inner.Contains(key)
-}
-
-func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) {
- _value, ok := c.inner.Get(key)
- if ok {
- //nolint:forcetypeassert // Typed wrapper around untyped lib.
- value = _value.(V)
- }
- return value, ok
-}
-
-func (c *LRUCache[K, V]) Keys() []K {
- untyped := c.inner.Keys()
- typed := make([]K, len(untyped))
- for i := range untyped {
- //nolint:forcetypeassert // Typed wrapper around untyped lib.
- typed[i] = untyped[i].(K)
- }
- return typed
-}
-
-func (c *LRUCache[K, V]) Len() int {
- return c.inner.Len()
-}
-
-func (c *LRUCache[K, V]) Peek(key K) (value V, ok bool) {
- _value, ok := c.inner.Peek(key)
- if ok {
- //nolint:forcetypeassert // Typed wrapper around untyped lib.
- value = _value.(V)
- }
- return value, ok
-}
-
-func (c *LRUCache[K, V]) Purge() {
- c.inner.Purge()
-}
-
-func (c *LRUCache[K, V]) Remove(key K) {
- c.inner.Remove(key)
-}
-
-func (c *LRUCache[K, V]) GetOrElse(key K, fn func() V) V {
- var value V
- var ok bool
- for value, ok = c.Get(key); !ok; value, ok = c.Get(key) {
- c.Add(key, fn())
- }
- return value
-}
diff --git a/lib/containers/lrucache.go b/lib/containers/lrucache.go
index e7a8d62..94094b9 100644
--- a/lib/containers/lrucache.go
+++ b/lib/containers/lrucache.go
@@ -36,6 +36,8 @@ type lruCache[K comparable, V any] struct {
byName map[K]*LinkedListEntry[lruEntry[K, V]]
}
+var _ Map[int, string] = (*lruCache[int, string])(nil)
+
func (c *lruCache[K, V]) rem(entry *LinkedListEntry[lruEntry[K, V]]) {
k, v := entry.Value.key, entry.Value.val
delete(c.byName, entry.Value.key)
diff --git a/lib/containers/maputil.go b/lib/containers/maputil.go
new file mode 100644
index 0000000..4d49d2a
--- /dev/null
+++ b/lib/containers/maputil.go
@@ -0,0 +1,32 @@
+// Copyright (C) 2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package containers
+
+type Map[K comparable, V any] interface {
+ Store(K, V)
+ Load(K) (V, bool)
+ Has(K) bool
+ Delete(K)
+ Len() int
+}
+
+type RangeMap[K comparable, V any] interface {
+ Map[K, V]
+ Range(func(K, V) bool)
+}
+
+type SubrangeMap[K comparable, V any] interface {
+ RangeMap[K, V]
+ Subrange(rangeFn func(K, V) int, handleFn func(K, V) bool)
+}
+
+func LoadOrElse[K comparable, V any](m Map[K, V], k K, vFn func(K) V) V {
+ if v, ok := m.Load(k); ok {
+ return v
+ }
+ v := vFn(k)
+ m.Store(k, v)
+ return v
+}
diff --git a/lib/containers/sortedmap.go b/lib/containers/sortedmap.go
index d104274..ebce685 100644
--- a/lib/containers/sortedmap.go
+++ b/lib/containers/sortedmap.go
@@ -17,6 +17,8 @@ type SortedMap[K Ordered[K], V any] struct {
inner RBTree[orderedKV[K, V]]
}
+var _ SubrangeMap[NativeOrdered[int], string] = (*SortedMap[NativeOrdered[int], string])(nil)
+
func (m *SortedMap[K, V]) Delete(key K) {
m.inner.Delete(m.inner.Search(func(kv orderedKV[K, V]) int {
return key.Compare(kv.K)
@@ -34,6 +36,13 @@ func (m *SortedMap[K, V]) Load(key K) (value V, ok bool) {
return node.Value.V, true
}
+func (m *SortedMap[K, V]) Has(key K) bool {
+ node := m.inner.Search(func(kv orderedKV[K, V]) int {
+ return key.Compare(kv.K)
+ })
+ return node != nil
+}
+
func (m *SortedMap[K, V]) Store(key K, value V) {
m.inner.Insert(orderedKV[K, V]{
K: key,
diff --git a/lib/diskio/file_blockbuf.go b/lib/diskio/file_blockbuf.go
index 77b823c..15ae13b 100644
--- a/lib/diskio/file_blockbuf.go
+++ b/lib/diskio/file_blockbuf.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -19,16 +19,18 @@ type bufferedFile[A ~int64] struct {
inner File[A]
mu sync.RWMutex
blockSize A
- blockCache *containers.LRUCache[A, bufferedBlock]
+ blockCache containers.ARCache[A, bufferedBlock]
}
var _ File[assertAddr] = (*bufferedFile[assertAddr])(nil)
func NewBufferedFile[A ~int64](file File[A], blockSize A, cacheSize int) *bufferedFile[A] {
return &bufferedFile[A]{
- inner: file,
- blockSize: blockSize,
- blockCache: containers.NewLRUCache[A, bufferedBlock](cacheSize),
+ inner: file,
+ blockSize: blockSize,
+ blockCache: containers.ARCache[A, bufferedBlock]{
+ MaxLen: cacheSize,
+ },
}
}
@@ -53,13 +55,13 @@ func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error
defer bf.mu.RUnlock()
offsetWithinBlock := off % bf.blockSize
blockOffset := off - offsetWithinBlock
- cachedBlock, ok := bf.blockCache.Get(blockOffset)
+ cachedBlock, ok := bf.blockCache.Load(blockOffset)
if !ok {
cachedBlock.Dat = make([]byte, bf.blockSize)
n, err := bf.inner.ReadAt(cachedBlock.Dat, blockOffset)
cachedBlock.Dat = cachedBlock.Dat[:n]
cachedBlock.Err = err
- bf.blockCache.Add(blockOffset, cachedBlock)
+ bf.blockCache.Store(blockOffset, cachedBlock)
}
n = copy(dat, cachedBlock.Dat[offsetWithinBlock:])
if n < len(dat) {
@@ -77,7 +79,7 @@ func (bf *bufferedFile[A]) WriteAt(dat []byte, off A) (n int, err error) {
// Cache invalidation
for blockOffset := off - (off % bf.blockSize); blockOffset < off+A(n); blockOffset += bf.blockSize {
- bf.blockCache.Remove(blockOffset)
+ bf.blockCache.Delete(blockOffset)
}
return