summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2023-03-30 10:04:09 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2023-03-30 10:04:09 -0600
commitcdc2df19c59965149e11c3a5710458c626ea0668 (patch)
tree2e0bafd99a433d7271bf42e68ab1b5c1eba99e2b /lib
parent0b092a27122fcf19479d6cdeae5f7c9493d9741a (diff)
parent94aa0ec3e9f7145cdf177ad6f6d3d8b7d5bdbef7 (diff)
Merge branch 'lukeshu/node-cache'
Diffstat (limited to 'lib')
-rw-r--r--lib/btrfs/btrfstree/btree.go4
-rw-r--r--lib/btrfs/btrfstree/btree_forrest.go7
-rw-r--r--lib/btrfs/btrfstree/btree_tree.go14
-rw-r--r--lib/btrfs/btrfstree/node_exp.go67
-rw-r--r--lib/btrfs/btrfstree/readnode.go24
-rw-r--r--lib/btrfs/btrfstree/types_node.go66
-rw-r--r--lib/btrfs/io2_lv.go3
-rw-r--r--lib/btrfs/io3_btree.go69
-rw-r--r--lib/btrfsutil/graph.go6
-rw-r--r--lib/btrfsutil/old_rebuilt_forrest.go17
-rw-r--r--lib/btrfsutil/rebuilt_forrest.go10
-rw-r--r--lib/btrfsutil/rebuilt_readitem.go34
-rw-r--r--lib/btrfsutil/scan.go4
-rw-r--r--lib/containers/arcache.go42
-rw-r--r--lib/containers/lrucache.go36
15 files changed, 240 insertions, 163 deletions
diff --git a/lib/btrfs/btrfstree/btree.go b/lib/btrfs/btrfstree/btree.go
index bc1bac2..4f5d21b 100644
--- a/lib/btrfs/btrfstree/btree.go
+++ b/lib/btrfs/btrfstree/btree.go
@@ -11,6 +11,7 @@ import (
"fmt"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
)
type TreeSearcher interface {
@@ -96,5 +97,6 @@ func (e *TreeError) Error() string {
type NodeSource interface {
Superblock() (*Superblock, error)
- ReadNode(Path) (*Node, error)
+ AcquireNode(ctx context.Context, addr btrfsvol.LogicalAddr, exp NodeExpectations) (*Node, error)
+ ReleaseNode(*Node)
}
diff --git a/lib/btrfs/btrfstree/btree_forrest.go b/lib/btrfs/btrfstree/btree_forrest.go
index 38a2721..b04bfc0 100644
--- a/lib/btrfs/btrfstree/btree_forrest.go
+++ b/lib/btrfs/btrfstree/btree_forrest.go
@@ -83,11 +83,14 @@ func LookupTreeRoot(_ context.Context, fs TreeOperator, sb Superblock, treeID bt
}
type TreeOperatorImpl struct {
- NodeSource
+ NodeSource interface {
+ NodeSource
+ NodeFile
+ }
}
func (fs TreeOperatorImpl) RawTree(ctx context.Context, treeID btrfsprim.ObjID) (*RawTree, error) {
- sb, err := fs.Superblock()
+ sb, err := fs.NodeSource.Superblock()
if err != nil {
return nil, err
}
diff --git a/lib/btrfs/btrfstree/btree_tree.go b/lib/btrfs/btrfstree/btree_tree.go
index a943803..86eab11 100644
--- a/lib/btrfs/btrfstree/btree_tree.go
+++ b/lib/btrfs/btrfstree/btree_tree.go
@@ -22,6 +22,9 @@ type RawTree struct {
}
func (tree *RawTree) TreeWalk(ctx context.Context, cbs TreeWalkHandler) {
+ if tree.RootNode == 0 {
+ return
+ }
path := Path{{
FromTree: tree.ID,
FromItemSlot: -1,
@@ -37,13 +40,14 @@ func (tree *RawTree) walk(ctx context.Context, path Path, cbs TreeWalkHandler) {
if ctx.Err() != nil {
return
}
- if path.Node(-1).ToNodeAddr == 0 {
- return
- }
// 001
- node, err := tree.Forrest.ReadNode(path)
- defer node.Free()
+ nodeAddr, nodeExp, ok := path.NodeExpectations(tree.Forrest.NodeSource)
+ if !ok {
+ return
+ }
+ node, err := tree.Forrest.NodeSource.AcquireNode(ctx, nodeAddr, nodeExp)
+ defer tree.Forrest.NodeSource.ReleaseNode(node)
if ctx.Err() != nil {
return
}
diff --git a/lib/btrfs/btrfstree/node_exp.go b/lib/btrfs/btrfstree/node_exp.go
new file mode 100644
index 0000000..dec6dbb
--- /dev/null
+++ b/lib/btrfs/btrfstree/node_exp.go
@@ -0,0 +1,67 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+package btrfstree
+
+import (
+ "fmt"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+)
+
+type NodeExpectations struct {
+ LAddr containers.Optional[btrfsvol.LogicalAddr]
+ // Things knowable from the parent.
+ Level containers.Optional[uint8]
+ Generation containers.Optional[btrfsprim.Generation]
+ Owner func(btrfsprim.ObjID, btrfsprim.Generation) error
+ MinItem containers.Optional[btrfsprim.Key]
+ // Things knowable from the structure of the tree.
+ MaxItem containers.Optional[btrfsprim.Key]
+}
+
+func (exp NodeExpectations) Check(node *Node) error {
+ var errs derror.MultiError
+ if exp.LAddr.OK && node.Head.Addr != exp.LAddr.Val {
+ errs = append(errs, fmt.Errorf("read from laddr=%v but claims to be at laddr=%v",
+ exp.LAddr.Val, node.Head.Addr))
+ }
+ if exp.Level.OK && node.Head.Level != exp.Level.Val {
+ errs = append(errs, fmt.Errorf("expected level=%v but claims to be level=%v",
+ exp.Level.Val, node.Head.Level))
+ }
+ if node.Head.Level > MaxLevel {
+ errs = append(errs, fmt.Errorf("maximum level=%v but claims to be level=%v",
+ MaxLevel, node.Head.Level))
+ }
+ if exp.Generation.OK && node.Head.Generation != exp.Generation.Val {
+ errs = append(errs, fmt.Errorf("expected generation=%v but claims to be generation=%v",
+ exp.Generation.Val, node.Head.Generation))
+ }
+ if exp.Owner != nil {
+ if err := exp.Owner(node.Head.Owner, node.Head.Generation); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if node.Head.NumItems == 0 {
+ errs = append(errs, fmt.Errorf("has no items"))
+ } else {
+ if minItem, _ := node.MinItem(); exp.MinItem.OK && exp.MinItem.Val.Compare(minItem) > 0 {
+ errs = append(errs, fmt.Errorf("expected minItem>=%v but node has minItem=%v",
+ exp.MinItem, minItem))
+ }
+ if maxItem, _ := node.MaxItem(); exp.MaxItem.OK && exp.MaxItem.Val.Compare(maxItem) < 0 {
+ errs = append(errs, fmt.Errorf("expected maxItem<=%v but node has maxItem=%v",
+ exp.MaxItem, maxItem))
+ }
+ }
+ if len(errs) > 0 {
+ return errs
+ }
+ return nil
+}
diff --git a/lib/btrfs/btrfstree/readnode.go b/lib/btrfs/btrfstree/readnode.go
index ac82c62..a4ccf10 100644
--- a/lib/btrfs/btrfstree/readnode.go
+++ b/lib/btrfs/btrfstree/readnode.go
@@ -14,8 +14,7 @@ import (
)
type NodeFile interface {
- diskio.File[btrfsvol.LogicalAddr]
- Superblock() (*Superblock, error)
+ diskio.ReaderAt[btrfsvol.LogicalAddr]
// ParentTree, given a tree ID, returns that tree's parent
// tree, if it has one.
@@ -29,15 +28,14 @@ type NodeFile interface {
ParentTree(btrfsprim.ObjID) (btrfsprim.ObjID, btrfsprim.Generation, bool)
}
-// FSReadNode is a utility function to help with implementing the
-// 'NodeSource' interface.
-func FSReadNode(
- fs NodeFile,
- path Path,
-) (*Node, error) {
- sb, err := fs.Superblock()
- if err != nil {
- return nil, fmt.Errorf("btrfs.FS.ReadNode: %w", err)
+// NodeExpectations returns the address to read and the expectations
+// to have when reading the node pointed to by this Path.
+//
+// `ok` is false if the path is empty or if this Path points to an
+// item rather than a node.
+func (path Path) NodeExpectations(fs NodeFile) (_ btrfsvol.LogicalAddr, _ NodeExpectations, ok bool) {
+ if path.Node(-1).ToNodeAddr == 0 && path.Node(-1).ToNodeGeneration == 0 && path.Node(-1).ToNodeLevel == 0 {
+ return 0, NodeExpectations{}, false
}
checkOwner := func(owner btrfsprim.ObjID, gen btrfsprim.Generation) error {
@@ -70,12 +68,12 @@ func FSReadNode(
}
}
- return ReadNode[btrfsvol.LogicalAddr](fs, *sb, path.Node(-1).ToNodeAddr, NodeExpectations{
+ return path.Node(-1).ToNodeAddr, NodeExpectations{
LAddr: containers.OptionalValue(path.Node(-1).ToNodeAddr),
Level: containers.OptionalValue(path.Node(-1).ToNodeLevel),
Generation: containers.OptionalValue(path.Node(-1).ToNodeGeneration),
Owner: checkOwner,
MinItem: containers.OptionalValue(path.Node(-1).ToKey),
MaxItem: containers.OptionalValue(path.Node(-1).ToMaxKey),
- })
+ }, true
}
diff --git a/lib/btrfs/btrfstree/types_node.go b/lib/btrfs/btrfstree/types_node.go
index bfcbbf4..e29431f 100644
--- a/lib/btrfs/btrfstree/types_node.go
+++ b/lib/btrfs/btrfstree/types_node.go
@@ -10,7 +10,6 @@ import (
"fmt"
"git.lukeshu.com/go/typedsync"
- "github.com/datawire/dlib/derror"
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
@@ -104,10 +103,13 @@ type NodeHeader struct {
Generation btrfsprim.Generation `bin:"off=0x50, siz=0x8"`
Owner btrfsprim.ObjID `bin:"off=0x58, siz=0x8"` // The ID of the tree that contains this node
NumItems uint32 `bin:"off=0x60, siz=0x4"` // [ignored-when-writing]
- Level uint8 `bin:"off=0x64, siz=0x1"` // 0 for leaf nodes, >=1 for interior nodes
+ Level uint8 `bin:"off=0x64, siz=0x1"` // 0 for leaf nodes, >=1 for interior nodes (max 8)
binstruct.End `bin:"off=0x65"`
}
+// MaxLevel is the maximum valid NodeHeader.Level.
+const MaxLevel = 8
+
// MaxItems returns the maximum possible valid value of
// .Head.NumItems.
func (node Node) MaxItems() uint32 {
@@ -306,7 +308,9 @@ type ItemHeader struct {
var itemPool containers.SlicePool[Item]
-func (node *Node) Free() {
+// RawFree is for low-level use by caches; don't use .RawFree, use
+// ReleaseNode.
+func (node *Node) RawFree() {
if node == nil {
return
}
@@ -412,17 +416,6 @@ func (node *Node) LeafFreeSpace() uint32 {
var ErrNotANode = errors.New("does not look like a node")
-type NodeExpectations struct {
- LAddr containers.Optional[btrfsvol.LogicalAddr]
- // Things knowable from the parent.
- Level containers.Optional[uint8]
- Generation containers.Optional[btrfsprim.Generation]
- Owner func(btrfsprim.ObjID, btrfsprim.Generation) error
- MinItem containers.Optional[btrfsprim.Key]
- // Things knowable from the structure of the tree.
- MaxItem containers.Optional[btrfsprim.Key]
-}
-
type NodeError[Addr ~int64] struct {
Op string
NodeAddr Addr
@@ -455,7 +448,7 @@ var nodePool = typedsync.Pool[*Node]{
// returned. The error returned (if non-nil) is always of type
// *NodeError[Addr]. Notable errors that may be inside of the
// NodeError are ErrNotANode and *IOError.
-func ReadNode[Addr ~int64](fs diskio.ReaderAt[Addr], sb Superblock, addr Addr, exp NodeExpectations) (*Node, error) {
+func ReadNode[Addr ~int64](fs diskio.ReaderAt[Addr], sb Superblock, addr Addr) (*Node, error) {
if int(sb.NodeSize) < nodeHeaderSize {
return nil, &NodeError[Addr]{
Op: "btrfstree.ReadNode", NodeAddr: addr,
@@ -521,50 +514,7 @@ func ReadNode[Addr ~int64](fs diskio.ReaderAt[Addr], sb Superblock, addr Addr, e
bytePool.Put(nodeBuf)
- // sanity checking (that doesn't prevent parsing)
-
- if err := exp.Check(node); err != nil {
- return node, &NodeError[Addr]{Op: "btrfstree.ReadNode", NodeAddr: addr, Err: err}
- }
-
// return
return node, nil
}
-
-func (exp NodeExpectations) Check(node *Node) error {
- var errs derror.MultiError
- if exp.LAddr.OK && node.Head.Addr != exp.LAddr.Val {
- errs = append(errs, fmt.Errorf("read from laddr=%v but claims to be at laddr=%v",
- exp.LAddr.Val, node.Head.Addr))
- }
- if exp.Level.OK && node.Head.Level != exp.Level.Val {
- errs = append(errs, fmt.Errorf("expected level=%v but claims to be level=%v",
- exp.Level.Val, node.Head.Level))
- }
- if exp.Generation.OK && node.Head.Generation != exp.Generation.Val {
- errs = append(errs, fmt.Errorf("expected generation=%v but claims to be generation=%v",
- exp.Generation.Val, node.Head.Generation))
- }
- if exp.Owner != nil {
- if err := exp.Owner(node.Head.Owner, node.Head.Generation); err != nil {
- errs = append(errs, err)
- }
- }
- if node.Head.NumItems == 0 {
- errs = append(errs, fmt.Errorf("has no items"))
- } else {
- if minItem, _ := node.MinItem(); exp.MinItem.OK && exp.MinItem.Val.Compare(minItem) > 0 {
- errs = append(errs, fmt.Errorf("expected minItem>=%v but node has minItem=%v",
- exp.MinItem, minItem))
- }
- if maxItem, _ := node.MaxItem(); exp.MaxItem.OK && exp.MaxItem.Val.Compare(maxItem) < 0 {
- errs = append(errs, fmt.Errorf("expected maxItem<=%v but node has maxItem=%v",
- exp.MaxItem, maxItem))
- }
- }
- if len(errs) > 0 {
- return errs
- }
- return nil
-}
diff --git a/lib/btrfs/io2_lv.go b/lib/btrfs/io2_lv.go
index 03b2107..72e97f3 100644
--- a/lib/btrfs/io2_lv.go
+++ b/lib/btrfs/io2_lv.go
@@ -16,6 +16,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
)
@@ -27,6 +28,8 @@ type FS struct {
cacheSuperblocks []*diskio.Ref[btrfsvol.PhysicalAddr, btrfstree.Superblock]
cacheSuperblock *btrfstree.Superblock
+ cacheNodes containers.Cache[btrfsvol.LogicalAddr, nodeCacheEntry]
+
cacheObjID2All map[btrfsprim.ObjID]treeInfo
cacheUUID2ObjID map[btrfsprim.UUID]btrfsprim.ObjID
}
diff --git a/lib/btrfs/io3_btree.go b/lib/btrfs/io3_btree.go
index b60f54a..8aa485f 100644
--- a/lib/btrfs/io3_btree.go
+++ b/lib/btrfs/io3_btree.go
@@ -6,23 +6,18 @@ package btrfs
import (
"context"
+ "fmt"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
// This file is ordered from low-level to high-level.
-// btrfstree.NodeSource ////////////////////////////////////////////////////////
-
-// ReadNode implements btrfstree.NodeSource.
-func (fs *FS) ReadNode(path btrfstree.Path) (*btrfstree.Node, error) {
- return btrfstree.FSReadNode(fs, path)
-}
-
-var _ btrfstree.NodeSource = (*FS)(nil)
-
// btrfstree.NodeFile //////////////////////////////////////////////////////////
type treeInfo struct {
@@ -85,6 +80,62 @@ func (fs *FS) ParentTree(tree btrfsprim.ObjID) (btrfsprim.ObjID, btrfsprim.Gener
var _ btrfstree.NodeFile = (*FS)(nil)
+// btrfstree.NodeSource ////////////////////////////////////////////////////////
+
+type nodeCacheEntry struct {
+ node *btrfstree.Node
+ err error
+}
+
+// AcquireNode implements btrfstree.NodeSource.
+func (fs *FS) AcquireNode(ctx context.Context, addr btrfsvol.LogicalAddr, exp btrfstree.NodeExpectations) (*btrfstree.Node, error) {
+ if fs.cacheNodes == nil {
+ fs.cacheNodes = containers.NewARCache[btrfsvol.LogicalAddr, nodeCacheEntry](
+ textui.Tunable(4*(btrfstree.MaxLevel+1)),
+ containers.SourceFunc[btrfsvol.LogicalAddr, nodeCacheEntry](fs.readNode),
+ )
+ }
+
+ nodeEntry := fs.cacheNodes.Acquire(ctx, addr)
+ if nodeEntry.err != nil {
+ err := nodeEntry.err
+ fs.cacheNodes.Release(addr)
+ return nil, err
+ }
+
+ if nodeEntry.node != nil {
+ if err := exp.Check(nodeEntry.node); err != nil {
+ fs.cacheNodes.Release(addr)
+ return nil, fmt.Errorf("btrfstree.ReadNode: node@%v: %w", addr, err) // fmt.Errorf("btrfs.FS.AcquireNode: node@%v: %w", addr, err)
+ }
+ }
+
+ return nodeEntry.node, nil
+}
+
+// ReleaseNode implements btrfstree.NodeSource.
+func (fs *FS) ReleaseNode(node *btrfstree.Node) {
+ if node == nil {
+ return
+ }
+ fs.cacheNodes.Release(node.Head.Addr)
+}
+
+func (fs *FS) readNode(_ context.Context, addr btrfsvol.LogicalAddr, nodeEntry *nodeCacheEntry) {
+ nodeEntry.node.RawFree()
+ nodeEntry.node = nil
+
+ sb, err := fs.Superblock()
+ if err != nil {
+ nodeEntry.err = err
+ return
+ }
+
+ nodeEntry.node, nodeEntry.err = btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, addr)
+}
+
+var _ btrfstree.NodeSource = (*FS)(nil)
+
// btrfstree.TreeOperator //////////////////////////////////////////////////////
// TreeWalk implements btrfstree.TreeOperator.
diff --git a/lib/btrfsutil/graph.go b/lib/btrfsutil/graph.go
index 39e1cf2..ea33be7 100644
--- a/lib/btrfsutil/graph.go
+++ b/lib/btrfsutil/graph.go
@@ -17,7 +17,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
@@ -199,7 +198,7 @@ func (g Graph) InsertNode(node *btrfstree.Node) {
}
}
-func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error {
+func (g Graph) FinalCheck(ctx context.Context, fs btrfstree.NodeSource) error {
var stats textui.Portion[int]
dlog.Info(ctx, "Checking keypointers for dead-ends...")
@@ -208,9 +207,10 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd
progressWriter.Set(stats)
for laddr := range g.EdgesTo {
if _, ok := g.Nodes[laddr]; !ok {
- _, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{
+ node, err := fs.AcquireNode(ctx, laddr, btrfstree.NodeExpectations{
LAddr: containers.OptionalValue(laddr),
})
+ fs.ReleaseNode(node)
if err == nil {
progressWriter.Done()
return fmt.Errorf("node@%v exists but was not in node scan results", laddr)
diff --git a/lib/btrfsutil/old_rebuilt_forrest.go b/lib/btrfsutil/old_rebuilt_forrest.go
index 5b99892..36abe6f 100644
--- a/lib/btrfsutil/old_rebuilt_forrest.go
+++ b/lib/btrfsutil/old_rebuilt_forrest.go
@@ -231,12 +231,7 @@ func (tree oldRebuiltTree) addErrs(fn func(btrfsprim.Key, uint32) int, err error
}
func (bt *OldRebuiltForrest) readNode(nodeInfo nodeInfo) *btrfstree.Node {
- sb, err := bt.inner.Superblock()
- if err != nil {
- panic(fmt.Errorf("should not happen: i/o error: %w", err))
- }
-
- node, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](bt.inner, *sb, nodeInfo.LAddr, btrfstree.NodeExpectations{
+ node, err := bt.inner.AcquireNode(bt.ctx, nodeInfo.LAddr, btrfstree.NodeExpectations{
LAddr: containers.OptionalValue(nodeInfo.LAddr),
Level: containers.OptionalValue(nodeInfo.Level),
Generation: containers.OptionalValue(nodeInfo.Generation),
@@ -286,7 +281,7 @@ func (tree oldRebuiltTree) treeSearch(_ context.Context, searcher btrfstree.Tree
}
node := tree.forrest.readNode(indexItem.Value.Node)
- defer node.Free()
+ defer tree.forrest.inner.ReleaseNode(node)
item := node.BodyLeaf[indexItem.Value.Slot]
item.Body = item.Body.CloneItem()
@@ -323,12 +318,12 @@ func (tree oldRebuiltTree) treeSubrange(_ context.Context, min int, searcher btr
func(rbNode *containers.RBNode[oldRebuiltTreeValue]) bool {
cnt++
if node == nil || node.Head.Addr != rbNode.Value.Node.LAddr {
- node.Free()
+ tree.forrest.inner.ReleaseNode(node)
node = tree.forrest.readNode(rbNode.Value.Node)
}
return handleFn(node.BodyLeaf[rbNode.Value.Slot])
})
- node.Free()
+ tree.forrest.inner.ReleaseNode(node)
var err error
if cnt < min {
@@ -371,7 +366,7 @@ func (tree oldRebuiltTree) treeWalk(ctx context.Context, cbs btrfstree.TreeWalkH
return false
}
if node == nil || node.Head.Addr != indexItem.Value.Node.LAddr {
- node.Free()
+ tree.forrest.inner.ReleaseNode(node)
node = tree.forrest.readNode(indexItem.Value.Node)
}
item := node.BodyLeaf[indexItem.Value.Slot]
@@ -404,7 +399,7 @@ func (tree oldRebuiltTree) treeWalk(ctx context.Context, cbs btrfstree.TreeWalkH
}
return ctx.Err() == nil
})
- node.Free()
+ tree.forrest.inner.ReleaseNode(node)
}
// Superblock implements btrfs.ReadableFS.
diff --git a/lib/btrfsutil/rebuilt_forrest.go b/lib/btrfsutil/rebuilt_forrest.go
index 811e1ac..60f3010 100644
--- a/lib/btrfsutil/rebuilt_forrest.go
+++ b/lib/btrfsutil/rebuilt_forrest.go
@@ -15,7 +15,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
@@ -130,7 +129,7 @@ func (cb noopRebuiltForrestCallbacks) LookupUUID(ctx context.Context, uuid btrfs
// NewRebuiltForrest().
type RebuiltForrest struct {
// static
- file diskio.File[btrfsvol.LogicalAddr]
+ file btrfstree.NodeSource
sb btrfstree.Superblock
graph Graph
cb RebuiltForrestCallbacks
@@ -142,13 +141,11 @@ type RebuiltForrest struct {
leafs containers.Cache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
incItems containers.Cache[btrfsprim.ObjID, itemIndex]
excItems containers.Cache[btrfsprim.ObjID, itemIndex]
-
- nodes containers.Cache[btrfsvol.LogicalAddr, btrfstree.Node]
}
// NewRebuiltForrest returns a new RebuiltForrest instance. The
// RebuiltForrestCallbacks may be nil.
-func NewRebuiltForrest(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, graph Graph, cb RebuiltForrestCallbacks) *RebuiltForrest {
+func NewRebuiltForrest(file btrfstree.NodeSource, sb btrfstree.Superblock, graph Graph, cb RebuiltForrestCallbacks) *RebuiltForrest {
ret := &RebuiltForrest{
file: file,
sb: sb,
@@ -171,9 +168,6 @@ func NewRebuiltForrest(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Supe
containers.SourceFunc[btrfsprim.ObjID, itemIndex](func(ctx context.Context, treeID btrfsprim.ObjID, excItems *itemIndex) {
*excItems = ret.trees[treeID].uncachedExcItems(ctx)
}))
- ret.nodes = containers.NewARCache[btrfsvol.LogicalAddr, btrfstree.Node](textui.Tunable(8),
- containers.SourceFunc[btrfsvol.LogicalAddr, btrfstree.Node](ret.readNode))
-
if ret.cb == nil {
ret.cb = noopRebuiltForrestCallbacks{
forrest: ret,
diff --git a/lib/btrfsutil/rebuilt_readitem.go b/lib/btrfsutil/rebuilt_readitem.go
index d3a2253..73dc01e 100644
--- a/lib/btrfsutil/rebuilt_readitem.go
+++ b/lib/btrfsutil/rebuilt_readitem.go
@@ -8,8 +8,6 @@ import (
"context"
"fmt"
- "github.com/datawire/dlib/dlog"
-
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
@@ -26,16 +24,20 @@ func (ptr ItemPtr) String() string {
return fmt.Sprintf("node@%v[%v]", ptr.Node, ptr.Slot)
}
-func (ts *RebuiltForrest) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr, out *btrfstree.Node) {
- dlog.Debugf(ctx, "cache-miss node@%v, reading...", laddr)
-
- graphInfo, ok := ts.graph.Nodes[laddr]
+func (ts *RebuiltForrest) readItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item {
+ graphInfo, ok := ts.graph.Nodes[ptr.Node]
if !ok {
- panic(fmt.Errorf("should not happen: node@%v is not mentioned in the in-memory graph", laddr))
+ panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for node@%v not mentioned in the in-memory graph", ptr.Node))
+ }
+ if graphInfo.Level != 0 {
+ panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for non-leaf node@%v", ptr.Node))
+ }
+ if ptr.Slot < 0 {
+ panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for negative item slot: %v", ptr.Slot))
}
- node, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](ts.file, ts.sb, laddr, btrfstree.NodeExpectations{
- LAddr: containers.OptionalValue(laddr),
+ node, err := ts.file.AcquireNode(ctx, ptr.Node, btrfstree.NodeExpectations{
+ LAddr: containers.OptionalValue(ptr.Node),
Level: containers.OptionalValue(graphInfo.Level),
Generation: containers.OptionalValue(graphInfo.Generation),
Owner: func(treeID btrfsprim.ObjID, gen btrfsprim.Generation) error {
@@ -49,22 +51,12 @@ func (ts *RebuiltForrest) readNode(ctx context.Context, laddr btrfsvol.LogicalAd
MinItem: containers.OptionalValue(graphInfo.MinItem()),
MaxItem: containers.OptionalValue(graphInfo.MaxItem()),
})
+ defer ts.file.ReleaseNode(node)
if err != nil {
panic(fmt.Errorf("should not happen: i/o error: %w", err))
}
- *out = *node
-}
-
-func (ts *RebuiltForrest) readItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item {
- if ts.graph.Nodes[ptr.Node].Level != 0 {
- panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for non-leaf node@%v", ptr.Node))
- }
- if ptr.Slot < 0 {
- panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for negative item slot: %v", ptr.Slot))
- }
- items := ts.nodes.Acquire(ctx, ptr.Node).BodyLeaf
- defer ts.nodes.Release(ptr.Node)
+ items := node.BodyLeaf
if ptr.Slot >= len(items) {
panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for out-of-bounds item slot: slot=%v len=%v",
diff --git a/lib/btrfsutil/scan.go b/lib/btrfsutil/scan.go
index 05b27d5..0e268e5 100644
--- a/lib/btrfsutil/scan.go
+++ b/lib/btrfsutil/scan.go
@@ -122,7 +122,7 @@ func ScanOneDevice[Stats comparable, Result any](ctx context.Context, dev *btrfs
}
if checkForNode {
- node, err := btrfstree.ReadNode[btrfsvol.PhysicalAddr](dev, *sb, pos, btrfstree.NodeExpectations{})
+ node, err := btrfstree.ReadNode[btrfsvol.PhysicalAddr](dev, *sb, pos)
if err != nil {
if !errors.Is(err, btrfstree.ErrNotANode) {
dlog.Errorf(ctx, "error: %v", err)
@@ -134,7 +134,7 @@ func ScanOneDevice[Stats comparable, Result any](ctx context.Context, dev *btrfs
}
minNextNode = pos + btrfsvol.PhysicalAddr(sb.NodeSize)
}
- node.Free()
+ node.RawFree()
}
}
diff --git a/lib/containers/arcache.go b/lib/containers/arcache.go
index 3f15f67..29ec030 100644
--- a/lib/containers/arcache.go
+++ b/lib/containers/arcache.go
@@ -230,7 +230,7 @@ type arCache[K comparable, V any] struct {
// Before getting to the meaty ARC stuff, let's get some
// boring/simple synchronization/blocking primitives out of the way:
-// waitForEval is called before storing something into the cache.
+// waitForAvail is called before storing something into the cache.
// This is nescessary because if the cache is full and all entries are
// pinned, then we won't have to store the entry until something gets
// unpinned ("Release()d").
@@ -243,19 +243,24 @@ func (c *arCache[K, V]) waitForAvail() {
ch := make(chan struct{})
c.waiters.Store(&LinkedListEntry[chan struct{}]{Value: ch})
c.mu.Unlock()
- <-ch
- c.mu.Lock()
+ <-ch // receive the lock from .Release()
+ if c.recentLive.IsEmpty() && c.frequentLive.IsEmpty() && c.unusedLive.IsEmpty() {
+ panic(fmt.Errorf("should not happen: waitForAvail is returning, but nothing is available"))
+ }
}
-// notifyAvail is called when an entry gets unpinned ("Release()d"),
-// and wakes up the highest-priority .waitForAvail() waiter (if there
-// is one).
-func (c *arCache[K, V]) notifyAvail() {
+// unlockAndNotifyAvail is called when an entry gets unpinned
+// ("Release()d"), and wakes up the highest-priority .waitForAvail()
+// waiter (if there is one).
+func (c *arCache[K, V]) unlockAndNotifyAvail() {
waiter := c.waiters.Oldest
if waiter == nil {
+ c.mu.Unlock()
return
}
c.waiters.Delete(waiter)
+ // We don't actually unlock, we're "transferring" the lock to
+ // the waiter.
close(waiter.Value)
}
@@ -419,7 +424,8 @@ func (c *arCache[K, V]) arcReplace(ghostEntry *LinkedListEntry[arcGhostEntry[K]]
// order to support "delete"; because without "delete",
// arcReplace wouldn't ever be called when the cache isn't
// full.)
- if entry := c.unusedLive.Oldest; entry != nil && !forceEviction {
+ if !c.unusedLive.IsEmpty() && !forceEviction {
+ entry := c.unusedLive.Oldest
c.unusedLive.Delete(entry)
return entry
}
@@ -446,15 +452,16 @@ func (c *arCache[K, V]) arcReplace(ghostEntry *LinkedListEntry[arcGhostEntry[K]]
// Make the decision.
recentLive := c.recentPinned.Len + c.recentLive.Len
switch { // NB: Also check .IsEmpty() in order to support pinning.
- case recentLive > c.recentLiveTarget, c.frequentLive.IsEmpty():
+ case recentLive > c.recentLiveTarget && !c.recentLive.IsEmpty():
evictFrom, evictTo = &c.recentLive, &c.recentGhost
- case recentLive < c.recentLiveTarget, c.recentLive.IsEmpty():
+ case recentLive < c.recentLiveTarget && !c.frequentLive.IsEmpty():
evictFrom, evictTo = &c.frequentLive, &c.frequentGhost
- case recentLive == c.recentLiveTarget:
+ default: // case recentLive == c.recentLiveTarget || the_normal_list_was_empty:
+
// The original paper says "The last replacement
// decision is somewhat arbitrary, and can be made
// differently if desired."
- if arbitrary && c.recentLive.Len > 0 {
+ if arbitrary && !c.recentLive.IsEmpty() {
evictFrom, evictTo = &c.recentLive, &c.recentGhost
} else {
evictFrom, evictTo = &c.frequentLive, &c.frequentGhost
@@ -562,8 +569,8 @@ func (c *arCache[K, V]) Delete(k K) {
c.unusedGhost.Store(entry)
}
- // No need to call c.notifyAvail(); if we were able to delete
- // it, it was already available.
+ // No need to call c.unlockAndNotifyAvail(); if we were able
+ // to delete it, it was already available.
c.mu.Unlock()
}
@@ -571,7 +578,6 @@ func (c *arCache[K, V]) Delete(k K) {
// Release implements the 'Cache' interface.
func (c *arCache[K, V]) Release(k K) {
c.mu.Lock()
- defer c.mu.Unlock()
entry := c.liveByName[k]
if entry == nil || entry.Value.refs <= 0 {
@@ -592,8 +598,12 @@ func (c *arCache[K, V]) Release(k K) {
case entry.List == &c.frequentPinned:
c.frequentPinned.Delete(entry)
c.frequentLive.Store(entry)
+ default:
+ panic(fmt.Errorf("should not happen: entry is not pending deletion, and is not in a pinned list"))
}
- c.notifyAvail()
+ c.unlockAndNotifyAvail()
+ } else {
+ c.mu.Unlock()
}
}
diff --git a/lib/containers/lrucache.go b/lib/containers/lrucache.go
index d2aff41..5c51435 100644
--- a/lib/containers/lrucache.go
+++ b/lib/containers/lrucache.go
@@ -60,30 +60,37 @@ type lruCache[K comparable, V any] struct {
// Blocking primitives /////////////////////////////////////////////////////////
-// Because of pinning, there might not actually be an available entry
-// for us to use/evict. If we need an entry to use or evict, we'll
-// call waitForAvail to block until there is en entry that is either
-// unused or evictable. We'll give waiters FIFO priority.
+// waitForAvail is called before storing something into the cache.
+// This is nescessary because if the cache is full and all entries are
+// pinned, then we won't have to store the entry until something gets
+// unpinned ("Release()d").
func (c *lruCache[K, V]) waitForAvail() {
if !(c.unused.IsEmpty() && c.evictable.IsEmpty()) {
+ // There is already an available `lruEntry` that we
+ // can either use or evict.
return
}
ch := make(chan struct{})
c.waiters.Store(&LinkedListEntry[chan struct{}]{Value: ch})
c.mu.Unlock()
- <-ch
- c.mu.Lock()
+ <-ch // receive the lock from .Release()
+ if c.unused.IsEmpty() && c.evictable.IsEmpty() {
+ panic(fmt.Errorf("should not happen: waitForAvail is returning, but nothing is available"))
+ }
}
-// notifyAvail is called when an entry becomes unused or evictable,
-// and wakes up the highest-priority .waitForAvail() waiter (if there
-// is one).
-func (c *lruCache[K, V]) notifyAvail() {
+// unlockAndNotifyAvail is called when an entry becomes unused or
+// evictable, and wakes up the highest-priority .waitForAvail() waiter
+// (if there is one).
+func (c *lruCache[K, V]) unlockAndNotifyAvail() {
waiter := c.waiters.Oldest
if waiter == nil {
+ c.mu.Unlock()
return
}
c.waiters.Delete(waiter)
+ // We don't actually unlock, we're "transferring" the lock to
+ // the waiter.
close(waiter.Value)
}
@@ -169,8 +176,8 @@ func (c *lruCache[K, V]) Delete(k K) {
c.evictable.Delete(entry)
c.unused.Store(entry)
- // No need to call c.notifyAvail(); if we were able to delete
- // it, it was already available.
+ // No need to call c.unlockAndNotifyAvail(); if we were able
+ // to delete it, it was already available.
c.mu.Unlock()
}
@@ -178,7 +185,6 @@ func (c *lruCache[K, V]) Delete(k K) {
// Release implements the 'Cache' interface.
func (c *lruCache[K, V]) Release(k K) {
c.mu.Lock()
- defer c.mu.Unlock()
entry := c.byName[k]
if entry == nil || entry.Value.refs <= 0 {
@@ -194,7 +200,9 @@ func (c *lruCache[K, V]) Release(k K) {
} else {
c.evictable.Store(entry)
}
- c.notifyAvail()
+ c.unlockAndNotifyAvail()
+ } else {
+ c.mu.Unlock()
}
}