summaryrefslogtreecommitdiff
path: root/lib/btrfsprogs
diff options
context:
space:
mode:
Diffstat (limited to 'lib/btrfsprogs')
-rw-r--r--lib/btrfsprogs/btrfsinspect/print_tree.go56
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go18
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go14
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go6
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go25
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go10
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go54
-rw-r--r--lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go5
-rw-r--r--lib/btrfsprogs/btrfsinspect/scandevices.go29
-rw-r--r--lib/btrfsprogs/btrfsutil/broken_btree.go9
-rw-r--r--lib/btrfsprogs/btrfsutil/skinny_paths.go23
11 files changed, 134 insertions, 115 deletions
diff --git a/lib/btrfsprogs/btrfsinspect/print_tree.go b/lib/btrfsprogs/btrfsinspect/print_tree.go
index 3807df5..8acf9cc 100644
--- a/lib/btrfsprogs/btrfsinspect/print_tree.go
+++ b/lib/btrfsprogs/btrfsinspect/print_tree.go
@@ -92,6 +92,8 @@ func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) {
textui.Fprintf(out, "uuid %v\n", superblock.FSUUID)
}
+var nodeHeaderSize = binstruct.StaticSize(btrfstree.NodeHeader{})
+
// printTree mimics btrfs-progs
// kernel-shared/print-tree.c:btrfs_print_tree() and
// kernel-shared/print-tree.c:btrfs_print_leaf()
@@ -100,7 +102,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
handlers := btrfstree.TreeWalkHandler{
Node: func(path btrfstree.TreePath, nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
printHeaderInfo(out, nodeRef.Data)
- itemOffset = nodeRef.Data.Size - uint32(binstruct.StaticSize(btrfstree.NodeHeader{}))
+ itemOffset = nodeRef.Data.Size - uint32(nodeHeaderSize)
return nil
},
PreKeyPointer: func(_ btrfstree.TreePath, item btrfstree.KeyPointer) error {
@@ -121,11 +123,11 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
itemOffset,
itemSize)
switch body := item.Body.(type) {
- case btrfsitem.FreeSpaceHeader:
+ case *btrfsitem.FreeSpaceHeader:
textui.Fprintf(out, "\t\tlocation %v\n", fmtKey(body.Location))
textui.Fprintf(out, "\t\tcache generation %v entries %v bitmaps %v\n",
body.Generation, body.NumEntries, body.NumBitmaps)
- case btrfsitem.Inode:
+ case *btrfsitem.Inode:
textui.Fprintf(out, ""+
"\t\tgeneration %v transid %v size %v nbytes %v\n"+
"\t\tblock group %v mode %o links %v uid %v gid %v rdev %v\n"+
@@ -137,14 +139,14 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
textui.Fprintf(out, "\t\tctime %v\n", fmtTime(body.CTime))
textui.Fprintf(out, "\t\tmtime %v\n", fmtTime(body.MTime))
textui.Fprintf(out, "\t\totime %v\n", fmtTime(body.OTime))
- case btrfsitem.InodeRefs:
- for _, ref := range body {
+ case *btrfsitem.InodeRefs:
+ for _, ref := range body.Refs {
textui.Fprintf(out, "\t\tindex %v namelen %v name: %s\n",
ref.Index, ref.NameLen, ref.Name)
}
// case btrfsitem.INODE_EXTREF_KEY:
// // TODO
- case btrfsitem.DirEntry:
+ case *btrfsitem.DirEntry:
textui.Fprintf(out, "\t\tlocation %v type %v\n",
fmtKey(body.Location), body.Type)
textui.Fprintf(out, "\t\ttransid %v data_len %v name_len %v\n",
@@ -155,7 +157,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
}
// case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
// // TODO
- case btrfsitem.Root:
+ case *btrfsitem.Root:
textui.Fprintf(out, "\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
textui.Fprintf(out, "\t\tlast_snapshot %v flags %v refs %v\n",
@@ -175,7 +177,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
textui.Fprintf(out, "\t\tstime %v\n", fmtTime(body.STime))
textui.Fprintf(out, "\t\trtime %v\n", fmtTime(body.RTime))
}
- case btrfsitem.RootRef:
+ case *btrfsitem.RootRef:
var tag string
switch item.Key.ItemType {
case btrfsitem.ROOT_REF_KEY:
@@ -187,7 +189,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
}
textui.Fprintf(out, "\t\troot %v key dirid %v sequence %v name %s\n",
tag, body.DirID, body.Sequence, body.Name)
- case btrfsitem.Extent:
+ case *btrfsitem.Extent:
textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
body.Head.Refs, body.Head.Generation, body.Head.Flags)
if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
@@ -195,7 +197,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
fmtKey(body.Info.Key), body.Info.Level)
}
printExtentInlineRefs(out, body.Refs)
- case btrfsitem.Metadata:
+ case *btrfsitem.Metadata:
textui.Fprintf(out, "\t\trefs %v gen %v flags %v\n",
body.Head.Refs, body.Head.Generation, body.Head.Flags)
textui.Fprintf(out, "\t\ttree block skinny level %v\n", item.Key.Offset)
@@ -204,7 +206,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
// // TODO
// case btrfsitem.SHARED_DATA_REF_KEY:
// // TODO
- case btrfsitem.ExtentCSum:
+ case *btrfsitem.ExtentCSum:
start := btrfsvol.LogicalAddr(item.Key.Offset)
textui.Fprintf(out, "\t\trange start %d end %d length %d",
start, start.Add(body.Size()), body.Size())
@@ -222,7 +224,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
return nil
})
textui.Fprintf(out, "\n")
- case btrfsitem.FileExtent:
+ case *btrfsitem.FileExtent:
textui.Fprintf(out, "\t\tgeneration %v type %v\n",
body.Generation, body.Type)
switch body.Type {
@@ -249,15 +251,15 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
default:
textui.Fprintf(out, "\t\t(error) unknown file extent type %v", body.Type)
}
- case btrfsitem.BlockGroup:
+ case *btrfsitem.BlockGroup:
textui.Fprintf(out, "\t\tblock group used %v chunk_objectid %v flags %v\n",
body.Used, body.ChunkObjectID, body.Flags)
- case btrfsitem.FreeSpaceInfo:
+ case *btrfsitem.FreeSpaceInfo:
textui.Fprintf(out, "\t\tfree space info extent count %v flags %d\n",
body.ExtentCount, body.Flags)
- case btrfsitem.FreeSpaceBitmap:
+ case *btrfsitem.FreeSpaceBitmap:
textui.Fprintf(out, "\t\tfree space bitmap\n")
- case btrfsitem.Chunk:
+ case *btrfsitem.Chunk:
textui.Fprintf(out, "\t\tlength %d owner %d stripe_len %v type %v\n",
body.Head.Size, body.Head.Owner, body.Head.StripeLen, body.Head.Type)
textui.Fprintf(out, "\t\tio_align %v io_width %v sector_size %v\n",
@@ -270,7 +272,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
textui.Fprintf(out, "\t\t\tdev_uuid %v\n",
stripe.DeviceUUID)
}
- case btrfsitem.Dev:
+ case *btrfsitem.Dev:
textui.Fprintf(out, ""+
"\t\tdevid %d total_bytes %v bytes_used %v\n"+
"\t\tio_align %v io_width %v sector_size %v type %v\n"+
@@ -284,18 +286,18 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
body.SeekSpeed, body.Bandwidth,
body.DevUUID,
body.FSUUID)
- case btrfsitem.DevExtent:
+ case *btrfsitem.DevExtent:
textui.Fprintf(out, ""+
"\t\tdev extent chunk_tree %d\n"+
"\t\tchunk_objectid %v chunk_offset %d length %d\n"+
"\t\tchunk_tree_uuid %v\n",
body.ChunkTree, body.ChunkObjectID, body.ChunkOffset, body.Length,
body.ChunkTreeUUID)
- case btrfsitem.QGroupStatus:
+ case *btrfsitem.QGroupStatus:
textui.Fprintf(out, ""+
"\t\tversion %v generation %v flags %v scan %d\n",
body.Version, body.Generation, body.Flags, body.RescanProgress)
- case btrfsitem.QGroupInfo:
+ case *btrfsitem.QGroupInfo:
textui.Fprintf(out, ""+
"\t\tgeneration %v\n"+
"\t\treferenced %d referenced_compressed %d\n"+
@@ -303,7 +305,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
body.Generation,
body.ReferencedBytes, body.ReferencedBytesCompressed,
body.ExclusiveBytes, body.ExclusiveBytesCompressed)
- case btrfsitem.QGroupLimit:
+ case *btrfsitem.QGroupLimit:
textui.Fprintf(out, ""+
"\t\tflags %x\n"+
"\t\tmax_referenced %d max_exclusive %d\n"+
@@ -311,11 +313,11 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
uint64(body.Flags),
body.MaxReferenced, body.MaxExclusive,
body.RsvReferenced, body.RsvExclusive)
- case btrfsitem.UUIDMap:
+ case *btrfsitem.UUIDMap:
textui.Fprintf(out, "\t\tsubvol_id %d\n", body.ObjID)
// case btrfsitem.STRING_ITEM_KEY:
// // TODO
- case btrfsitem.DevStats:
+ case *btrfsitem.DevStats:
textui.Fprintf(out, "\t\tpersistent item objectid %v offset %v\n",
item.Key.ObjectID.Format(item.Key.ItemType), item.Key.Offset)
switch item.Key.ObjectID {
@@ -332,7 +334,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
}
// case btrfsitem.TEMPORARY_ITEM_KEY:
// // TODO
- case btrfsitem.Empty:
+ case *btrfsitem.Empty:
switch item.Key.ItemType {
case btrfsitem.ORPHAN_ITEM_KEY: // 48
textui.Fprintf(out, "\t\torphan item\n")
@@ -351,7 +353,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
default:
textui.Fprintf(out, "\t\t(error) unhandled empty item type: %v\n", item.Key.ItemType)
}
- case btrfsitem.Error:
+ case *btrfsitem.Error:
textui.Fprintf(out, "\t\t(error) error item: %v\n", body.Err)
default:
textui.Fprintf(out, "\t\t(error) unhandled item type: %T\n", body)
@@ -423,10 +425,10 @@ func printExtentInlineRefs(out io.Writer, refs []btrfsitem.ExtentInlineRef) {
default:
textui.Fprintf(out, "\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
}
- case btrfsitem.ExtentDataRef:
+ case *btrfsitem.ExtentDataRef:
textui.Fprintf(out, "\t\textent data backref root %v objectid %v offset %v count %v\n",
subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count)
- case btrfsitem.SharedDataRef:
+ case *btrfsitem.SharedDataRef:
textui.Fprintf(out, "\t\tshared data backref parent %v count %v\n",
ref.Offset, subitem.Count)
default:
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
index 45a5bb2..3eeea7f 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/forrest.go
@@ -64,9 +64,9 @@ type RebuiltForrest struct {
// mutable
trees typedsync.Map[btrfsprim.ObjID, *RebuiltTree]
- leafs *containers.LRUCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
- incItems *containers.LRUCache[btrfsprim.ObjID, *itemIndex]
- excItems *containers.LRUCache[btrfsprim.ObjID, *itemIndex]
+ leafs containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
+ incItems containers.ARCache[btrfsprim.ObjID, *itemIndex]
+ excItems containers.ARCache[btrfsprim.ObjID, *itemIndex]
}
// NewRebuiltForrest returns a new RebuiltForrest instance. All of
@@ -86,9 +86,15 @@ func NewRebuiltForrest(
cbLookupRoot: cbLookupRoot,
cbLookupUUID: cbLookupUUID,
- leafs: containers.NewLRUCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](textui.Tunable(8)),
- incItems: containers.NewLRUCache[btrfsprim.ObjID, *itemIndex](textui.Tunable(8)),
- excItems: containers.NewLRUCache[btrfsprim.ObjID, *itemIndex](textui.Tunable(8)),
+ leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{
+ MaxLen: textui.Tunable(8),
+ },
+ incItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{
+ MaxLen: textui.Tunable(8),
+ },
+ excItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{
+ MaxLen: textui.Tunable(8),
+ },
}
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
index 66cb0fa..c9d0fa4 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees/tree.go
@@ -49,7 +49,7 @@ type RebuiltTree struct {
// leafToRoots returns all leafs (lvl=0) in the filesystem that pass
// .isOwnerOK, whether or not they're in the tree.
func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
- return tree.forrest.leafs.GetOrElse(tree.ID, func() map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
+ return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-nodes", fmt.Sprintf("tree=%v", tree.ID))
nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
@@ -139,7 +139,7 @@ func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generati
// RebuiltTree's internal map!
func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID))
- return tree.items(ctx, tree.forrest.incItems, tree.Roots.HasAny)
+ return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny)
}
// PotentialItems returns a map of items that could be added to this
@@ -149,7 +149,7 @@ func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsp
// RebuiltTree's internal map!
func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID))
- return tree.items(ctx, tree.forrest.excItems,
+ return tree.items(ctx, &tree.forrest.excItems,
func(roots containers.Set[btrfsvol.LogicalAddr]) bool {
return !tree.Roots.HasAny(roots)
})
@@ -168,13 +168,13 @@ func (s itemStats) String() string {
s.Leafs, s.NumItems, s.NumDups)
}
-func (tree *RebuiltTree) items(ctx context.Context, cache *containers.LRUCache[btrfsprim.ObjID, *itemIndex],
+func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex],
leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool,
) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] {
tree.mu.RLock()
defer tree.mu.RUnlock()
- return cache.GetOrElse(tree.ID, func() *itemIndex {
+ return containers.LoadOrElse(cache, tree.ID, func(btrfsprim.ObjID) *itemIndex {
var leafs []btrfsvol.LogicalAddr
for leaf, roots := range tree.leafToRoots(ctx) {
if leafFn(roots) {
@@ -298,8 +298,8 @@ func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalA
progressWriter.Done()
tree.Roots.Insert(rootNode)
- tree.forrest.incItems.Remove(tree.ID) // force re-gen
- tree.forrest.excItems.Remove(tree.ID) // force re-gen
+ tree.forrest.incItems.Delete(tree.ID) // force re-gen
+ tree.forrest.excItems.Delete(tree.ID) // force re-gen
if (tree.ID == btrfsprim.ROOT_TREE_OBJECTID || tree.ID == btrfsprim.UUID_TREE_OBJECTID) && stats.AddedItems > 0 {
tree.forrest.trees.Range(func(otherTreeID btrfsprim.ObjID, otherTree *RebuiltTree) bool {
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
index cf86d74..c04fec0 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph/graph.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -151,7 +151,7 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No
if nodeRef.Data.Head.Level == 0 {
cnt := 0
for _, item := range nodeRef.Data.BodyLeaf {
- if _, ok := item.Body.(btrfsitem.Root); ok {
+ if _, ok := item.Body.(*btrfsitem.Root); ok {
cnt++
}
}
@@ -161,7 +161,7 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No
g.Nodes[nodeRef.Addr] = nodeData
for i, item := range nodeRef.Data.BodyLeaf {
keys[i] = item.Key
- if itemBody, ok := item.Body.(btrfsitem.Root); ok {
+ if itemBody, ok := item.Body.(*btrfsitem.Root); ok {
kps = append(kps, Edge{
FromRoot: nodeRef.Addr,
FromItem: i,
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
index 64a9828..b4ab645 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio/keyio.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -48,7 +48,7 @@ type Handle struct {
Names map[ItemPtr][]byte // DIR_INDEX
Sizes map[ItemPtr]SizeAndErr // EXTENT_CSUM and EXTENT_DATA
- cache *containers.LRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]
+ cache containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]
}
func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle {
@@ -60,7 +60,12 @@ func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock)
Names: make(map[ItemPtr][]byte),
Sizes: make(map[ItemPtr]SizeAndErr),
- cache: containers.NewLRUCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]](textui.Tunable(8)),
+ cache: containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]{
+ MaxLen: textui.Tunable(8),
+ OnRemove: func(_ btrfsvol.LogicalAddr, nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) {
+ btrfstree.FreeNodeRef(nodeRef)
+ },
+ },
}
}
@@ -71,27 +76,27 @@ func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.
Idx: i,
}
switch itemBody := item.Body.(type) {
- case btrfsitem.Inode:
+ case *btrfsitem.Inode:
o.Flags[ptr] = FlagsAndErr{
NoDataSum: itemBody.Flags.Has(btrfsitem.INODE_NODATASUM),
Err: nil,
}
- case btrfsitem.DirEntry:
+ case *btrfsitem.DirEntry:
if item.Key.ItemType == btrfsprim.DIR_INDEX_KEY {
o.Names[ptr] = append([]byte(nil), itemBody.Name...)
}
- case btrfsitem.ExtentCSum:
+ case *btrfsitem.ExtentCSum:
o.Sizes[ptr] = SizeAndErr{
Size: uint64(itemBody.Size()),
Err: nil,
}
- case btrfsitem.FileExtent:
+ case *btrfsitem.FileExtent:
size, err := itemBody.Size()
o.Sizes[ptr] = SizeAndErr{
Size: uint64(size),
Err: err,
}
- case btrfsitem.Error:
+ case *btrfsitem.Error:
switch item.Key.ItemType {
case btrfsprim.INODE_ITEM_KEY:
o.Flags[ptr] = FlagsAndErr{
@@ -113,7 +118,7 @@ func (o *Handle) SetGraph(graph graph.Graph) {
}
func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] {
- if cached, ok := o.cache.Get(laddr); ok {
+ if cached, ok := o.cache.Load(laddr); ok {
dlog.Tracef(ctx, "cache-hit node@%v", laddr)
return cached
}
@@ -142,7 +147,7 @@ func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *disk
panic(fmt.Errorf("should not happen: i/o error: %w", err))
}
- o.cache.Add(laddr, ref)
+ o.cache.Store(laddr, ref)
return ref
}
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
index bd29278..624441f 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go
@@ -290,9 +290,9 @@ func (o *rebuilder) cbLookupRoot(ctx context.Context, tree btrfsprim.ObjID) (off
o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v", key))
}
switch itemBody := itemBody.(type) {
- case btrfsitem.Root:
- return btrfsprim.Generation(key.Offset), itemBody, true
- case btrfsitem.Error:
+ case *btrfsitem.Root:
+ return btrfsprim.Generation(key.Offset), *itemBody, true
+ case *btrfsitem.Error:
o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err))
return 0, btrfsitem.Root{}, false
default:
@@ -315,9 +315,9 @@ func (o *rebuilder) cbLookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id b
o.ioErr(ctx, fmt.Errorf("could not read previously read item: %v", key))
}
switch itemBody := itemBody.(type) {
- case btrfsitem.UUIDMap:
+ case *btrfsitem.UUIDMap:
return itemBody.ObjID, true
- case btrfsitem.Error:
+ case *btrfsitem.Error:
o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", key, itemBody.Err))
return 0, false
default:
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
index 9e40465..710030c 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go
@@ -50,7 +50,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
// https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page
// https://btrfs.wiki.kernel.org/index.php/Data_Structures )
switch body := item.Body.(type) {
- case btrfsitem.BlockGroup:
+ case *btrfsitem.BlockGroup:
o.want(ctx, "Chunk",
btrfsprim.CHUNK_TREE_OBJECTID,
body.ChunkObjectID,
@@ -60,22 +60,22 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_INFO_KEY,
item.Key.Offset)
- case btrfsitem.Chunk:
+ case *btrfsitem.Chunk:
o.want(ctx, "owning Root",
btrfsprim.ROOT_TREE_OBJECTID,
body.Head.Owner,
btrfsitem.ROOT_ITEM_KEY)
- case btrfsitem.Dev:
+ case *btrfsitem.Dev:
// nothing
- case btrfsitem.DevExtent:
+ case *btrfsitem.DevExtent:
o.wantOff(ctx, "Chunk",
body.ChunkTree,
body.ChunkObjectID,
btrfsitem.CHUNK_ITEM_KEY,
uint64(body.ChunkOffset))
- case btrfsitem.DevStats:
+ case *btrfsitem.DevStats:
// nothing
- case btrfsitem.DirEntry:
+ case *btrfsitem.DirEntry:
// containing-directory
o.wantOff(ctx, "containing dir inode",
treeID,
@@ -126,9 +126,9 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
o.fsErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType))
}
}
- case btrfsitem.Empty:
+ case *btrfsitem.Empty:
// nothing
- case btrfsitem.Extent:
+ case *btrfsitem.Extent:
// if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
// // Supposedly this flag indicates that
// // body.Info.Key identifies a node by the
@@ -140,7 +140,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
switch refBody := ref.Body.(type) {
case nil:
// nothing
- case btrfsitem.ExtentDataRef:
+ case *btrfsitem.ExtentDataRef:
o.wantOff(ctx, "referencing Inode",
refBody.Root,
refBody.ObjectID,
@@ -151,7 +151,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
refBody.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
uint64(refBody.Offset))
- case btrfsitem.SharedDataRef:
+ case *btrfsitem.SharedDataRef:
// nothing
default:
// This is a panic because the item decoder should not emit a new
@@ -159,9 +159,9 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
panic(fmt.Errorf("should not happen: Extent: unexpected .Refs[%d].Body type %T", i, refBody))
}
}
- case btrfsitem.ExtentCSum:
+ case *btrfsitem.ExtentCSum:
// nothing
- case btrfsitem.ExtentDataRef:
+ case *btrfsitem.ExtentDataRef:
o.want(ctx, "Extent being referenced",
btrfsprim.EXTENT_TREE_OBJECTID,
item.Key.ObjectID,
@@ -176,7 +176,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
body.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
uint64(body.Offset))
- case btrfsitem.FileExtent:
+ case *btrfsitem.FileExtent:
o.wantOff(ctx, "containing Inode",
treeID,
item.Key.ObjectID,
@@ -194,19 +194,19 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
default:
o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type))
}
- case btrfsitem.FreeSpaceBitmap:
+ case *btrfsitem.FreeSpaceBitmap:
o.wantOff(ctx, "FreeSpaceInfo",
treeID,
item.Key.ObjectID,
btrfsitem.FREE_SPACE_INFO_KEY,
item.Key.Offset)
- case btrfsitem.FreeSpaceHeader:
+ case *btrfsitem.FreeSpaceHeader:
o.wantOff(ctx, ".Location",
treeID,
body.Location.ObjectID,
body.Location.ItemType,
body.Location.Offset)
- case btrfsitem.FreeSpaceInfo:
+ case *btrfsitem.FreeSpaceInfo:
if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) {
o.wantOff(ctx, "FreeSpaceBitmap",
treeID,
@@ -214,7 +214,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
btrfsitem.FREE_SPACE_BITMAP_KEY,
item.Key.Offset)
}
- case btrfsitem.Inode:
+ case *btrfsitem.Inode:
o.want(ctx, "backrefs",
treeID, // TODO: validate the number of these against body.NLink
item.Key.ObjectID,
@@ -227,7 +227,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
body.BlockGroup,
btrfsitem.BLOCK_GROUP_ITEM_KEY)
}
- case btrfsitem.InodeRefs:
+ case *btrfsitem.InodeRefs:
o.wantOff(ctx, "child Inode",
treeID,
item.Key.ObjectID,
@@ -238,7 +238,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
btrfsprim.ObjID(item.Key.Offset),
btrfsitem.INODE_ITEM_KEY,
0)
- for _, ref := range body {
+ for _, ref := range body.Refs {
o.wantOff(ctx, "DIR_ITEM",
treeID,
btrfsprim.ObjID(item.Key.Offset),
@@ -250,12 +250,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
btrfsitem.DIR_INDEX_KEY,
uint64(ref.Index))
}
- case btrfsitem.Metadata:
+ case *btrfsitem.Metadata:
for i, ref := range body.Refs {
switch refBody := ref.Body.(type) {
case nil:
// nothing
- case btrfsitem.ExtentDataRef:
+ case *btrfsitem.ExtentDataRef:
o.wantOff(ctx, "referencing INode",
refBody.Root,
refBody.ObjectID,
@@ -266,7 +266,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
refBody.ObjectID,
btrfsitem.EXTENT_DATA_KEY,
uint64(refBody.Offset))
- case btrfsitem.SharedDataRef:
+ case *btrfsitem.SharedDataRef:
// nothing
default:
// This is a panic because the item decoder should not emit a new
@@ -274,7 +274,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
panic(fmt.Errorf("should not happen: Metadata: unexpected .Refs[%d].Body type %T", i, refBody))
}
}
- case btrfsitem.Root:
+ case *btrfsitem.Root:
if body.RootDirID != 0 {
o.wantOff(ctx, "root directory",
item.Key.ObjectID,
@@ -298,7 +298,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
key.ItemType,
key.Offset)
}
- case btrfsitem.RootRef:
+ case *btrfsitem.RootRef:
var otherType btrfsprim.ItemType
var parent, child btrfsprim.ObjID
switch item.Key.ItemType {
@@ -347,17 +347,17 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID,
treeID,
child,
btrfsitem.ROOT_ITEM_KEY)
- case btrfsitem.SharedDataRef:
+ case *btrfsitem.SharedDataRef:
o.want(ctx, "Extent",
btrfsprim.EXTENT_TREE_OBJECTID,
item.Key.ObjectID,
btrfsitem.EXTENT_ITEM_KEY)
- case btrfsitem.UUIDMap:
+ case *btrfsitem.UUIDMap:
o.want(ctx, "subvolume Root",
btrfsprim.ROOT_TREE_OBJECTID,
body.ObjID,
btrfsitem.ROOT_ITEM_KEY)
- case btrfsitem.Error:
+ case *btrfsitem.Error:
o.fsErr(ctx, fmt.Errorf("error decoding item: %w", body.Err))
default:
// This is a panic because the item decoder should not emit new types without this
diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
index 7e19802..632ed70 100644
--- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
+++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/scan.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -48,12 +48,15 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca
LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
})
if err != nil {
+ btrfstree.FreeNodeRef(nodeRef)
return btrfstree.Superblock{}, graph.Graph{}, nil, err
}
nodeGraph.InsertNode(nodeRef)
keyIO.InsertNode(nodeRef)
+ btrfstree.FreeNodeRef(nodeRef)
+
stats.N++
progressWriter.Set(stats)
}
diff --git a/lib/btrfsprogs/btrfsinspect/scandevices.go b/lib/btrfsprogs/btrfsinspect/scandevices.go
index 9b8360c..d54be71 100644
--- a/lib/btrfsprogs/btrfsinspect/scandevices.go
+++ b/lib/btrfsprogs/btrfsinspect/scandevices.go
@@ -105,6 +105,8 @@ func (s scanStats) String() string {
s.NumFoundExtentCSums)
}
+var sbSize = btrfsvol.PhysicalAddr(binstruct.StaticSize(btrfstree.Superblock{}))
+
// ScanOneDevice mostly mimics btrfs-progs
// cmds/rescue-chunk-recover.c:scan_one_device().
func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblock) (ScanOneDeviceResult, error) {
@@ -114,8 +116,6 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
FoundNodes: make(map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr),
}
- sbSize := btrfsvol.PhysicalAddr(binstruct.StaticSize(btrfstree.Superblock{}))
-
devSize := dev.Size()
if sb.NodeSize < sb.SectorSize {
return result, fmt.Errorf("node_size(%v) < sector_size(%v)",
@@ -183,14 +183,14 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
switch item.Key.ItemType {
case btrfsitem.CHUNK_ITEM_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.Chunk:
+ case *btrfsitem.Chunk:
dlog.Tracef(ctx, "node@%v: item %v: found chunk",
nodeRef.Addr, i)
result.FoundChunks = append(result.FoundChunks, btrfstree.SysChunk{
Key: item.Key,
- Chunk: itemBody,
+ Chunk: *itemBody,
})
- case btrfsitem.Error:
+ case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed CHUNK_ITEM: %v",
nodeRef.Addr, i, itemBody.Err)
default:
@@ -198,14 +198,14 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
}
case btrfsitem.BLOCK_GROUP_ITEM_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.BlockGroup:
+ case *btrfsitem.BlockGroup:
dlog.Tracef(ctx, "node@%v: item %v: found block group",
nodeRef.Addr, i)
result.FoundBlockGroups = append(result.FoundBlockGroups, SysBlockGroup{
Key: item.Key,
- BG: itemBody,
+ BG: *itemBody,
})
- case btrfsitem.Error:
+ case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed BLOCK_GROUP_ITEM: %v",
nodeRef.Addr, i, itemBody.Err)
default:
@@ -213,14 +213,14 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
}
case btrfsitem.DEV_EXTENT_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.DevExtent:
+ case *btrfsitem.DevExtent:
dlog.Tracef(ctx, "node@%v: item %v: found dev extent",
nodeRef.Addr, i)
result.FoundDevExtents = append(result.FoundDevExtents, SysDevExtent{
Key: item.Key,
- DevExt: itemBody,
+ DevExt: *itemBody,
})
- case btrfsitem.Error:
+ case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed DEV_EXTENT: %v",
nodeRef.Addr, i, itemBody.Err)
default:
@@ -228,14 +228,14 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
}
case btrfsitem.EXTENT_CSUM_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.ExtentCSum:
+ case *btrfsitem.ExtentCSum:
dlog.Tracef(ctx, "node@%v: item %v: found csums",
nodeRef.Addr, i)
result.FoundExtentCSums = append(result.FoundExtentCSums, SysExtentCSum{
Generation: nodeRef.Data.Head.Generation,
- Sums: itemBody,
+ Sums: *itemBody,
})
- case btrfsitem.Error:
+ case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed is EXTENT_CSUM: %v",
nodeRef.Addr, i, itemBody.Err)
default:
@@ -245,6 +245,7 @@ func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblo
}
minNextNode = pos + btrfsvol.PhysicalAddr(sb.NodeSize)
}
+ btrfstree.FreeNodeRef(nodeRef)
}
}
progress(devSize)
diff --git a/lib/btrfsprogs/btrfsutil/broken_btree.go b/lib/btrfsprogs/btrfsutil/broken_btree.go
index 7ea31ce..15641ab 100644
--- a/lib/btrfsprogs/btrfsutil/broken_btree.go
+++ b/lib/btrfsprogs/btrfsutil/broken_btree.go
@@ -237,11 +237,13 @@ func (bt *brokenTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key,
itemPath := bt.arena.Inflate(indexItem.Value.Path)
node, err := bt.inner.ReadNode(itemPath.Parent())
+ defer btrfstree.FreeNodeRef(node)
if err != nil {
return btrfstree.Item{}, bt.addErrs(index, fn, err)
}
item := node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx]
+ item.Body = item.Body.CloneItem()
// Since we were only asked to return 1 item, it isn't
// necessary to augment this `nil` with bt.addErrs().
@@ -271,13 +273,16 @@ func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.K
itemPath := bt.arena.Inflate(indexItems[i].Path)
if node == nil || node.Addr != itemPath.Node(-2).ToNodeAddr {
var err error
+ btrfstree.FreeNodeRef(node)
node, err = bt.inner.ReadNode(itemPath.Parent())
if err != nil {
+ btrfstree.FreeNodeRef(node)
return nil, bt.addErrs(index, fn, err)
}
}
ret[i] = node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx]
}
+ btrfstree.FreeNodeRef(node)
return ret, bt.addErrs(index, fn, nil)
}
@@ -306,8 +311,10 @@ func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, err
itemPath := bt.arena.Inflate(indexItem.Value.Path)
if node == nil || node.Addr != itemPath.Node(-2).ToNodeAddr {
var err error
+ btrfstree.FreeNodeRef(node)
node, err = bt.inner.ReadNode(itemPath.Parent())
if err != nil {
+ btrfstree.FreeNodeRef(node)
errHandle(&btrfstree.TreeError{Path: itemPath, Err: err})
return true
}
@@ -319,6 +326,7 @@ func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, err
}
return true
})
+ btrfstree.FreeNodeRef(node)
}
func (bt *brokenTrees) Superblock() (*btrfstree.Superblock, error) {
@@ -339,6 +347,7 @@ func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.Logical
return nil, index.TreeRootErr
}
nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](bt.inner, *sb, nodeAddr, btrfstree.NodeExpectations{})
+ defer btrfstree.FreeNodeRef(nodeRef)
if err != nil {
return nil, err
}
diff --git a/lib/btrfsprogs/btrfsutil/skinny_paths.go b/lib/btrfsprogs/btrfsutil/skinny_paths.go
index 6a51739..1695990 100644
--- a/lib/btrfsprogs/btrfsutil/skinny_paths.go
+++ b/lib/btrfsprogs/btrfsutil/skinny_paths.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -29,21 +29,13 @@ type SkinnyPathArena struct {
SB btrfstree.Superblock
fatRoots map[btrfsvol.LogicalAddr]btrfstree.TreePathElem
- fatItems *containers.LRUCache[skinnyItem, btrfstree.TreePathElem]
+ fatItems containers.ARCache[skinnyItem, btrfstree.TreePathElem]
}
func (a *SkinnyPathArena) init() {
if a.fatRoots == nil {
a.fatRoots = make(map[btrfsvol.LogicalAddr]btrfstree.TreePathElem)
- // This cache size is sorta arbitrary. At first I figured
- // "let's allow 1GB of cached items", and figured 67bytes per
- // item, that's about 16M items. But with overhead of the
- // LRUCache, it's actually a lot higher than that. So then I
- // cut it to .5M, and that cut my total memory use to ~8GB,
- // which is a good number for me. Then I tought it to do a
- // better job of recovering trees, and so the memory grew, and I
- // cut it to 64K. Then to 8K. Then grew it to 128K.
- a.fatItems = containers.NewLRUCache[skinnyItem, btrfstree.TreePathElem](textui.Tunable(128 * 1024))
+ a.fatItems.MaxLen = textui.Tunable(128 * 1024)
}
}
@@ -54,7 +46,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
a.init()
- ret, ok := a.fatItems.Get(skinnyItem{
+ ret, ok := a.fatItems.Load(skinnyItem{
Node: parent.Node(-1).ToNodeAddr,
Item: itemIdx,
})
@@ -63,6 +55,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
}
node, err := btrfstree.ReadNode(a.FS, a.SB, parent.Node(-1).ToNodeAddr, btrfstree.NodeExpectations{})
+ defer btrfstree.FreeNodeRef(node)
if err != nil {
return btrfstree.TreePathElem{}, err
}
@@ -84,7 +77,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
ToKey: item.Key,
ToMaxKey: toMaxKey,
}
- a.fatItems.Add(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
+ a.fatItems.Store(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
if i == itemIdx {
ret = elem
}
@@ -100,7 +93,7 @@ func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfs
ToKey: item.Key,
ToMaxKey: item.Key,
}
- a.fatItems.Add(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
+ a.fatItems.Store(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem)
if i == itemIdx {
ret = elem
}
@@ -121,7 +114,7 @@ func (a *SkinnyPathArena) Deflate(fat btrfstree.TreePath) SkinnyPath {
a.fatRoots[elem.ToNodeAddr] = elem
ret.Root = elem.ToNodeAddr
} else {
- a.fatItems.Add(skinnyItem{Node: prevNode, Item: elem.FromItemIdx}, elem)
+ a.fatItems.Store(skinnyItem{Node: prevNode, Item: elem.FromItemIdx}, elem)
ret.Items = append(ret.Items, elem.FromItemIdx)
}
prevNode = elem.ToNodeAddr